diff --git a/.circleci/config.yml b/.circleci/config.yml index 69b7a43726..2cefa567c5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,33 +2,79 @@ # # Check https://circleci.com/docs/2.0/language-python/ for more details # - -version: 2 - workflows: version: 2 build: jobs: - - docs-build + - test + - docs-build: + filters: + branches: + ignore: master - docs-build-deploy: filters: + tags: + only: /^v.*/ branches: - only: main + ignore: /.*/ +version: 2 jobs: + test: + docker: + - image: circleci/python:3.7 + + working_directory: ~/repo + + steps: + - checkout + + # Download and cache dependencies + - restore_cache: + keys: + - v1-dependencies-python3.7-{{ checksum "requirements.txt" }} + # fallback to using the latest cache if no exact match is found + - v1-dependencies-python3.7- + + - run: + name: install dependencies + command: | + python3 -m venv venv + . venv/bin/activate + pip install -r requirements.txt + + - save_cache: + paths: + - ./venv + key: v1-dependencies-python3.7-{{ checksum "requirements.txt" }} + + - run: + name: run tests + command: | + . venv/bin/activate + TESTING=1 pytest -rf test/ + + - run: + name: check linting + command: | + . venv/bin/activate + flake8 parsons/ test/ useful_resources/ + + - store_artifacts: + path: test-reports + destination: test-reports - # Documentation CI docs-build: docker: - - image: cimg/python:3.10 + - image: circleci/python:3.7 steps: - checkout # Download and cache dependencies - restore_cache: keys: - - v2-dependencies-python3.10-{{ checksum "./docs/requirements.txt" }} + - v1-dependencies-python3.7-{{ checksum "./docs/requirements.txt" }} # fallback to using the latest cache if no exact match is found - - v2-dependencies-python3.10- + - v1-dependencies-python3.7- - run: name: Install dependencies # Note that we the circleci node image installs stuff with a user "circleci", rather @@ -40,26 +86,24 @@ jobs: - save_cache: paths: - ./venv - key: v2-dependencies-python3.10-{{ checksum "./docs/requirements.txt" }} + key: v1-dependencies-python3.7-{{ checksum "./docs/requirements.txt" }} - run: name: Build docs command: | . venv/bin/activate - cd docs/ - make deploy_docs - cd .. + cd docs/ && make html && cd .. docs-build-deploy: docker: - - image: cimg/python:3.10-node + - image: circleci/python:3.7-node steps: - checkout # Download and cache dependencies - restore_cache: keys: - - v2-dependencies-python3.10-{{ checksum "./docs/requirements.txt" }} + - v1-dependencies-python3.7-{{ checksum "./docs/requirements.txt" }} # fallback to using the latest cache if no exact match is found - - v2-dependencies-python3.10- + - v1-dependencies-python3.7- - run: name: Install dependencies # Note that we the circleci node image installs stuff with a user "circleci", rather @@ -73,12 +117,12 @@ jobs: - save_cache: paths: - ./venv - key: v2-dependencies-python3.10-{{ checksum "./docs/requirements.txt" }} + key: v1-dependencies-python3.7-{{ checksum "./docs/requirements.txt" }} - add_ssh_keys: # This SSH key is "CircleCI Docs" in https://github.com/move-coop/parsons/settings/keys # We need write access to the Parsons repo, so we can push the "gh-pages" branch. fingerprints: - - '9a:ec:4d:2b:c3:45:b2:f5:55:ca:0b:2b:36:e2:7f:df' + - 'a6:b1:ec:19:86:19:8b:98:1e:b1:41:b2:e1:4a:4f:3d' - run: name: Build and deploy docs # When running gh-pages, we specify to include dotfiles, so we pick up the .nojerkyll file. @@ -86,9 +130,7 @@ jobs: # that start with an underscore like _static/). command: | . venv/bin/activate - cd docs/ - make deploy_docs - cd .. + cd docs/ && make html && cd .. git config user.email "ci-build@movementcooperative.org" git config user.name "ci-build" export PATH=/home/circleci/npm/bin:$PATH diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md deleted file mode 100644 index 061924e91f..0000000000 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -name: Bug Report -about: Tell us about a problem with the project's current functionality -title: "[Bug]" -labels: bug -assignees: '' ---- - - - - -## Detailed Description - - - -## To Reproduce - - - -## Your Environment - -* Version of Parsons used (if you know): -* Environment name and version (e.g. Chrome 39, node.js 5.4): -* Operating System and version (desktop or mobile): - - -## Additional Context -Add any other context about the problem here. - - -## Priority -Please indicate whether fixing this bug is high, medium, or low priority for you. If the issue is time-sensitive for you, please let us know when you need it addressed by. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/proposed-feature-addition.md b/.github/ISSUE_TEMPLATE/proposed-feature-addition.md deleted file mode 100644 index 32a9c48948..0000000000 --- a/.github/ISSUE_TEMPLATE/proposed-feature-addition.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -name: Proposed Feature/Addition -about: Suggest an addition to Parsons -title: "[Feature/Addition]" -labels: enhancement -assignees: '' ---- - - - - -## Detailed Description - - - -## Context - - - - -## Possible Implementation - - - -## Priority - \ No newline at end of file diff --git a/.github/release.yml b/.github/release.yml deleted file mode 100644 index d540f4c52f..0000000000 --- a/.github/release.yml +++ /dev/null @@ -1,22 +0,0 @@ -changelog: - categories: - - title: New Features - labels: - - connector-update - - new-connector - - parsons-core - - title: Automated Testing - labels: - - testing - - title: Bug Fixes - labels: - - bug-fix - - title: Documentation - labels: - - documentation - # - title: New Contributors - # labels: - # -🎉-first-PR - - title: Other Changes - labels: - - "*" \ No newline at end of file diff --git a/.github/workflows/test-linux-windows.yml b/.github/workflows/test-linux-windows.yml deleted file mode 100644 index 2143f3eadd..0000000000 --- a/.github/workflows/test-linux-windows.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: tests - -on: - pull_request: - branches: ["main"] - push: - branches: ["main"] - -env: - TESTING: 1 - -jobs: - build: - strategy: - matrix: - python-version: ['3.8', '3.9', '3.10'] - os: [ubuntu-latest] # add in windows-latest to add windows testing - include: - - os: ubuntu-latest - path: ~/.cache/pip -# - os: windows-latest -# path: ~\AppData\Local\pip\Cache - runs-on: ${{ matrix.os }} - - steps: - - - uses: actions/checkout@v3 - - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - uses: actions/cache@v3 - with: - path: ${{ matrix.path }} - key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} - restore-keys: | - ${{ runner.os }}-pip- - - - name: Install dependencies - run: python -m pip install -r requirements.txt - - - name: Run tests - run: pytest -rf test/ - - - name: check linting - run: | - # E203 and W503 don't work well with black - flake8 parsons/ test/ useful_resources/ --extend-ignore=E203,W503 - black --check parsons/ test/ useful_resources/ diff --git a/.github/workflows/tests-mac.yml b/.github/workflows/tests-mac.yml deleted file mode 100644 index 01612343e7..0000000000 --- a/.github/workflows/tests-mac.yml +++ /dev/null @@ -1,43 +0,0 @@ -name: tests for mac -# test mac on single python version as mac tests use 10x minutes/storage - -on: - pull_request: - branches: ["main"] - push: - branches: ["main"] - -env: - TESTING: 1 - -jobs: - build: - runs-on: macos-latest - - steps: - - - uses: actions/checkout@v3 - - - name: Set up Python 3.8 - uses: actions/setup-python@v4 - with: - python-version: 3.8 - - - uses: actions/cache@v3 - with: - path: ~/Library/Caches/pip - key: mac-pip-${{ hashFiles('**/requirements.txt') }} - restore-keys: | - mac-pip- - - - name: Install dependencies - run: python -m pip install -r requirements.txt - - - name: Run tests - run: TESTING=1 pytest -rf test/ - - - name: check linting - run: | - # E203 and W503 don't work well with black - flake8 parsons/ test/ useful_resources/ --extend-ignore=E203,W503 - black --check parsons/ test/ useful_resources/ diff --git a/.gitignore b/.gitignore index 8b1ee6b888..d9bd874db4 100644 --- a/.gitignore +++ b/.gitignore @@ -64,7 +64,7 @@ instance/ .scrapy # Sphinx documentation -docs/html +#docs/_build/ # PyBuilder target/ @@ -124,4 +124,3 @@ bill_com_credentials.* docs/html docs/dirhtml -*.sw* diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 7376c5aca1..0000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,15 +0,0 @@ -repos: - - repo: https://github.com/pycqa/flake8 - rev: 6.1.0 - hooks: - - id: flake8 - language_version: python3 - args: [ - '--extend-ignore=E203,W503', - '--max-line-length=100' - ] - - repo: https://github.com/psf/black - rev: 22.3.0 - hooks: - - id: black - language_version: python3 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 187a5a727c..a2ae6a58d3 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -55,11 +55,11 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting hr@movementcooperative.org. All complaints will be reviewed -and investigated and will result in a response that is deemed necessary and -appropriate to the circumstances. The project team is obligated to maintain -confidentiality with regard to the reporter of an incident. Further details -of specific enforcement policies may be posted separately. +reported by contacting the project team at justin@movementcooperative.org. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e34f0872c2..5e71751430 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,15 +1,159 @@ We're thrilled that you're thinking about contributing to Parsons! Welcome to our contributor community. -You can find a detailed version of this guide [on our website](https://www.parsonsproject.org/pub/contributing-guide/). +Here are some ways you can contribute: -The best way to get involved is by joining our Slack. To join, email engineering@movementcooperative.org. In addition to all the great discussions that happen on our Slack, we also have virtual events including trainings, pairing sessions, social hangouts, discussions, and more. Every other Thursday afternoon we host 🎉 Parsons Parties 🎉 on Zoom where we work on contributions together. +* [submit an issue](#submitting-issues) +* [contribute code](#contributing-code-to-parsons) +* [contribute documentation](#documentation) +* [add sample code to our library of examples](#contributing-sample-code) -You can contribute by: +Every other Thursday afternoon we host 🎉 Parsons Parties 🎉 on Zoom where we work on contributions together. Reach out if you'd like to join - it's a great way to get involved. -* [submitting issues](https://www.parsonsproject.org/pub/contributing-guide#submitting-issues) -* [contributing code](https://www.parsonsproject.org/pub/contributing-guide/) -* [updating our documentation](https://www.parsonsproject.org/pub/updating-documentation/) -* [teaching and mentoring](https://www.parsonsproject.org/pub/contributing-guide#teaching-and-mentoring) -* [helping "triage" issues and review pull requests](https://www.parsonsproject.org/pub/contributing-guide#maintainer-tasks) +## Submitting Issues -If you're not sure how to get started, please ask for help! We're happy to chat and help you find the best way to get involved. \ No newline at end of file +We encourage folks to review existing issues before starting a new issue. + +* If the issue you want exists, feel free to use the *thumbs up* emoji to up vote the issue. +* If you have additional documentation or context that would be helpful, please add using comments. +* If you have code snippets, but don’t have time to do the full write, please add to the issue! + +We use labels to help us classify issues. They include: +* **bug** - something in Parsons isn’t working the way it should +* **enhancement** - new feature or request (e.g. a new API connector) +* **good first issue** - an issue that would be good for someone who is new to Parsons + +## Contributing Code to Parsons + +Generally, code contributions to Parsons will be either enhancements or bug requests (or contributions of [sample code](#sample-code), discussed below). All changes to the repository are made [via pull requests](#submitting-a-pull-request). + +If you would like to contribute code to Parsons, please review the issues in the repository and find one you would like to work on. If you are new to Parsons or to open source projects, look for issues with the [**good first issue**](https://github.com/move-coop/parsons/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) label. Once you have found your issue, please add a comment to the issue that lets others know that you are interested in working on it. If you're having trouble finding something to work on, please ask us for help on Slack. + +The bulk of Parsons is made up of Connector classes, which are Python classes that help move data in and out of third party services. When you feel ready, you may want to contribute by [adding a new Connector class](https://move-coop.github.io/parsons/html/build_a_connector.html). + +### Making Changes to Parsons + +To make code changes to Parsons, you'll need to set up your development environment, make your changes, and then submit a pull request. + +To set up your development environment: + +* Fork the Parsons project using [the “Fork” button in GitHub](https://guides.github.com/activities/forking/) +* Clone your fork to your local computer +* Set up a [virtual environment](#virtual-environments) +* Install the [dependencies](#installing-dependencies) +* Check that everything's working by [running the unit tests](#unit-tests) and the [linter](#linting) + +Now it's time to make your changes. We suggest taking a quick look at our [coding conventions](#coding-conventions) - it'll make the review process easier down the line. In addition to any code changes, make sure to update the documentation and the unit tests if necessary. Not sure if your changes require test or documentation updates? Just ask in Slack or through a comment on the relevant issue. When you're done, make sure to run the [unit tests](#unit-tests) and the [linter](#linting) again. + +Finally, you'll want to [submit a pull request](#submitting-a-pull-request). And that's it! + +#### Virtual Environments + +If required dependencies conflict with packages or modules you need for other projects, you can create and use a [virtual environment](https://docs.python.org/3/library/venv.html). + +``` +python3 -m venv .venv # Creates a virtual environment in the .venv folder +source .venv/bin/activate # Activate in Unix or MacOS +.venv/Scripts/activate.bat # Activate in Windows +``` + +#### Installing Dependencies + +Before running or testing your code changes, be sure to install all of the required Python libraries that Parsons depends on. + +From the root of the parsons repository, use the run the following command: + +```bash +> pip install -r requirements.txt +``` + +#### Unit Tests + +When contributing code, we ask you to add to tests that can be used to verify that the code is working as expected. All of our unit tests are located in the `test/` folder at the root of the repository. + +We use the pytest tool to run our suite of automated unit tests. The pytest command line tool is installed as part of the Parsons dependencies. + +To run all the entire suite of unit tests, execute the following command: + +```bash +> pytest -rf test/ +``` + +Once the pytest tool has finished running all of the tests, it will output details around any errors or test failures it encountered. If no failures are identified, then you are good to go! + +**Note:*** Some tests are written to call out to external API’s, and will be skipped as part of standard unit testing. This is expected. + +See the [pytest documentation](https://docs.pytest.org/en/latest/contents.html) for more info and many more options. + +#### Linting + +We use the [flake8](http://flake8.pycqa.org/en/latest/) tool to [lint](https://en.wikipedia.org/wiki/Lint_(software)) the code in the repository to make sure it matches our preferred style. The flake8 command line tool is installed as part of the Parsons dependencies. + +Run the following command from the root of the Parsons repository to lint your code changes: + +```bash +> flake8 --max-line-length=100 parsons +``` + +#### Coding Conventions + +The following is a list of best practices to consider when writing code for the Parsons project: + +* Each tool connector should be its own unique class (e.g. ActionKit, VAN) in its own Python package. Use existing connectors as examples when deciding how to layout your code. + +* Methods should be named using a verb_noun structure, such as `get_activist()` or `update_event()`. + +* Methods should reflect the vocabulary utilized by the original tool where possible to mantain transparency. For example, Google Cloud Storage refers to file like objects as blobs. The methods are called `get_blob()` rather than `get_file()`. + +* Methods that can work with arbitrarily large data (e.g. database or API queries) should use of Parson Tables to hold the data instead of standard Python collections (e.g. lists, dicts). + +* You should avoid abbreviations for method names and variable names where possible. + +* Inline comments explaining complex codes and methods are appreciated. + +* Capitalize the word Parsons for consistency where possible, especially in documentation. + +If you are building a new connector or extending an existing connector, there are more best practices in the [How to Build a Connector](https://move-coop.github.io/parsons/html/build_a_connector.html) documentation. + +## Documentation + +Parsons documentation is built using the Python Sphinx tool. Sphinx uses the `docs/*.rst` files in the repository to create the documentation. + +We have a [documentation label](https://github.com/move-coop/parsons/issues?q=is%3Aissue+is%3Aopen+label%3Adocumentation) that may help you find good docs issues to work on. If you are adding a new connector, you will need to add a reference to the connector to one of the .rst files. Please use the existing documentation as an example. + +When editing documentation, make sure you are editing the source files (with .md or .rst extension) and not the build files (.html extension). + +The workflow for documentation changes is a bit simpler than for code changes: + +* Fork the Parsons project using [the “Fork” button in GitHub](https://guides.github.com/activities/forking/) +* Clone your fork to your local computer +* Change into the `docs` folder and install the requirements with `pip install -r requirements.txt` (you may want to set up a [virtual environment](#virtual-environments) first) +* Make your changes and re-build the docs by running `make html`. +* Open these files in your web browser to check that they look as you expect. +* [Submit a pull request](#submitting-a-pull-request) + +When you make documentation changes, you only need to track the source files with git. The docs built by the html folder should not be included. + +You should not need to worry about the unit tests or the linter if you are making documentation changes only. + +## Contributing Sample Code + +One important way to contribute to the Parsons project is to submit sample code that provides recipes and patterns for how to use the Parsons library. + +We have a folder called `useful_resources/` in the root of the repository. If you have scripts that incorporate Parsons, we encourage you to add them there! + +The workflow for adding sample code is: + +* Fork the Parsons project using [the “Fork” button in GitHub](https://guides.github.com/activities/forking/) +* Clone your fork to your local computer +* Add your sample code into the `useful_resources/` folder +* [Submit a pull request](#submitting-a-pull-request) + +You should not need to worry about the unit tests or the linter if you are only adding sample code. + +## Submitting a Pull Request + +To submit a pull request, follow [these instructions to create a Pull Request from your fork](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) back to the original Parsons repository. + +The Parsons team will review your pull request and provide feedback. Please feel free to ping us if no one's responded to your Pull Request after a few days. We may not be able to review it right away, but we should be able to tell you when we'll get to it. + +Once your pull request has been approved, the Parsons team will merge your changes into the Parsons repository diff --git a/Dockerfile b/Dockerfile index 7fdd250950..1f885f4781 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM --platform=linux/amd64 python:3.7 +FROM python:3.7 #################### ## Selenium setup ## diff --git a/README.md b/README.md index c89506083c..a084cf53ab 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ [![Downloads](https://pepy.tech/badge/parsons)](https://pepy.tech/project/parsons) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/parsons)](https://pypi.org/project/parsons/) [![PyPI](https://img.shields.io/pypi/v/parsons?color=blue)](https://pypi.org/project/parsons/) -[![CircleCI](https://circleci.com/gh/move-coop/parsons/tree/main.svg?style=shield)](https://circleci.com/gh/move-coop/parsons/tree/main) +[![CircleCI](https://circleci.com/gh/move-coop/parsons/tree/master.svg?style=shield)](https://circleci.com/gh/move-coop/parsons/tree/master) A Python package that provides a simple interface to a variety of utilities and tools frequently used by progressive organizations, political and issue campaigns, activists, and other allied actors. @@ -10,36 +10,29 @@ Parsons offers simplified interactions with these services and tools, including This project is maintained by [The Movement Cooperative](https://movementcooperative.org/) and is named after [Lucy Parsons](https://en.wikipedia.org/wiki/Lucy_Parsons). The Movement Cooperative is a member-led organization focused on providing data, tools, and strategic support for the progressive community. -Parsons is only supported for Python 3.8-10. +Parsons is only compatible with Python 3.6/7/8 -## Table of Contents -- [License and Usage](#license-and-usage) -- [Documentation](#documentation) -- [Installation](#installation) -- [Quickstart](#quickstart) -- [Community](#community) +### License and Usage +Usage of Parsons is governed by the [TMC Parsons License](https://github.com/move-coop/parsons/blob/master/LICENSE.md), which allows for unlimited non-commercial usage, provided that individuals and organizations adhere to our broad values statement. -## License and Usage -Usage of Parsons is governed by a [modified Apache License with author attribution statement](https://github.com/move-coop/parsons/blob/main/LICENSE.md). - -## Documentation +### Documentation To gain a full understanding of all of the features of Parsons, please review the Parsons [documentation](https://move-coop.github.io/parsons/html/index.html). -## Installation +### Installation -### PYPI -You can install the most recent release by running: `pip install parsons[all]` +#### PYPI +You can install the most recent release by running: `pip install parsons` -### Install from Github +#### Install from Github To access the most recent code base that may contain features not yet included in the latest release, download this repository and then run `python setup.py develop`. -### Docker Container -We have a Parsons Docker container hosted on [DockerHub](https://hub.docker.com/r/movementcooperative/parsons) for each release of Parsons, including the `latest`. +#### Docker Container +We have a Parsons Docker container hosted on [DockerHub](https://cloud.docker.com/u/movementcooperative/repository/docker/movementcooperative/parsons) for each release of Parsons, including the `latest`. -## Quickstart +### Quickstart For this Quickstart, we are looking to generate a list of voters with cell phones using a [dummy data file](docs/quickstart.csv). We use the `assert` statements to verify that the data has been loaded correctly. @@ -69,7 +62,7 @@ sheet_id = sheets.create_spreadsheet('Voter Cell Phones') sheets.append_to_sheet(sheet_id, people_with_cell_phones) ``` -## Community -We hope to foster a strong and robust community of individuals who use and contribute to further development. Individuals are encouraged to submit issues with bugs, suggestions and feature requests. [Here](https://github.com/move-coop/parsons/blob/main/CONTRIBUTING.md) are the guidelines and best practices for contributing to Parsons. +### Community +We hope to foster a strong and robust community of individuals who use and contribute to further development. Individuals are encouraged to submit issues with bugs, suggestions and feature requests. [Here](https://github.com/move-coop/parsons/blob/master/CONTRIBUTING.md) are the guidelines and best practices for contributing to Parsons. You can also stay up to date by joining the Parsons Slack group, an active community of Parsons contributors and progressive data engineers. For an invite, just reach out to engineering+parsons@movementcooperative.org! diff --git a/docs/Makefile b/docs/Makefile index 8735959ed9..f2124180fb 100755 --- a/docs/Makefile +++ b/docs/Makefile @@ -12,12 +12,6 @@ BUILDDIR = . help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -deploy_docs: - git branch latest - git branch stable $$(git tag -l --sort=creatordate | tail -1) - sphinx-multiversion . html - cp ./index-redirect.html html/index.html - .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new diff --git a/docs/_static/images/civis_container_script.png b/docs/_static/images/civis_container_script.png deleted file mode 100644 index f852346be3..0000000000 Binary files a/docs/_static/images/civis_container_script.png and /dev/null differ diff --git a/docs/_static/images/civis_etl_workflow.png b/docs/_static/images/civis_etl_workflow.png deleted file mode 100644 index cbc70bae6d..0000000000 Binary files a/docs/_static/images/civis_etl_workflow.png and /dev/null differ diff --git a/docs/_static/images/civis_mobilize_import.png b/docs/_static/images/civis_mobilize_import.png deleted file mode 100644 index a9b4432ef7..0000000000 Binary files a/docs/_static/images/civis_mobilize_import.png and /dev/null differ diff --git a/docs/_static/parsons_logo.png b/docs/_static/parsons_logo.png deleted file mode 100644 index aafabd6a5a..0000000000 Binary files a/docs/_static/parsons_logo.png and /dev/null differ diff --git a/docs/_templates/versions.html b/docs/_templates/versions.html deleted file mode 100644 index bc20f16769..0000000000 --- a/docs/_templates/versions.html +++ /dev/null @@ -1,29 +0,0 @@ -{% if display_versions_lower_left and current_version %} -
- - Read the Docs - {{ current_version.name }} - - -
-
-
{{ _('Most Common') }}
- - {% for version in versions %} - {% if version.name|first != "v" %} -
{{ version.name }}
- {% endif %} - {% endfor %} - -
{{ _('By Version Number') }}
- - {% for version in versions %} - {% if version.name|first == "v" %} -
{{ version.name }}
- {% endif %} - {% endfor %} - -
-
-
-{% endif %} \ No newline at end of file diff --git a/docs/action_builder.rst b/docs/action_builder.rst deleted file mode 100644 index e6d328ede4..0000000000 --- a/docs/action_builder.rst +++ /dev/null @@ -1,88 +0,0 @@ -Action Builder -========== - -******** -Overview -******** - -`Action Builder `_ is an online tool for field organizing, with an -original use-case designed for the Labor context. While it has essentially no built-in outreach -capabilities, it does provide robust record and relationship storage, including the ability to -create custom record types. For more information, see -`Action Builder developer docs `_ - -.. note:: - Custom Fields/Tags - Action Builder custom fields are treated as tags in both the SQL Mirror, and the API. This - means that, with a couple exceptions such as date, values must be created ahead of time to be - applied to a record. Each tag has two layers of taxonomy above it as well, that appear slightly - differently in the SQL Mirror and in the API. In the SQL Mirror, each tag has a - ``tag_category``, and each category has a ``tag_group``. In the API, the equivalents are called - ``tag_field`` and ``tag_section``, respectively (closer to the naming in the UI). Tags can be - applied on Connections as well as on Entities. - -*********** -Quick Start -*********** - -To instantiate a class, you can either pass in the API token as an argument or set the -``ACTION_BUILDER_API_TOKEN`` environmental variable. The subdomain at which you access the UI must -also be provided. If all calls from this object will be to the same Campaign in Action Builder, -an optional campaign argument may also be supplied. If not supplied when instantiating, campaign -may be passed to individual methods, instead. - -.. code-block:: python - - from parsons import ActionBuilder - - # First approach: Use API credentials via environmental variables - bldr = ActionBuilder(subdomain='yourorgsubdomain') - - # Second approach: Pass API credentials as arguments - bldr = ActionBuilder(api_token='MY_API_TOKEN', subdomain='yourorgsubdomain') - - # Third approach: Include campaign argument - bldr = ActionBuilder( - api_token = 'MY_API_TOKEN', - subdomain = 'yourorgsubdomain', - campaign = 'your-desired-campaign-id' - ) - -You can then call various endpoints: - -.. code-block:: python - # Assuming instantiation with campaign provided - - # List all tags stored in the provided Action Builder campaign - all_tags = bldr.get_campaign_tags() - - # Add a new tag value to the options available for the the field - bldr.insert_new_tag( - tag_name = 'Mom's Phone', # This is new - tag_field = 'Favorite Toy', # This would already exist, created in the UI - tag_section = 'Preferences' # This would already exist, created in the UI - ) - - # Add a person record to the provided Action Builder campaign - bldr.upsert_entity( - entity_type = 'Person', - data = {"person": {"given_name": "Rory"}} - ) - - # Connect two records and apply some tags to the Connection - tag_data = { # All of the values below must already have been created - "action_builder:name": "Friend of the Family", - "action_builder:field": "Relationship", - "action_builder:section": "People to People Info" - } - - bldr.upsert_connection( - ["entity-interact-id-1", "entity-interact-id-2"], # Any two entity IDs - tag_data = tag_data - ) - -*** -API -*** -.. autoclass :: parsons.ActionBuilder - :inherited-members: diff --git a/docs/action_kit.rst b/docs/action_kit.rst index 5946f80af4..fca5e75fa4 100644 --- a/docs/action_kit.rst +++ b/docs/action_kit.rst @@ -50,7 +50,7 @@ You can then call various endpoints: # Update user fields ak.update_user(user_id='123', city='New York') - # Delete user + # Delete uer ak.delete_user(user_id='123') *** diff --git a/docs/action_network.rst b/docs/action_network.rst index 2eec27dea3..da7c2b9d8f 100644 --- a/docs/action_network.rst +++ b/docs/action_network.rst @@ -6,11 +6,11 @@ Overview ******** `Action Network `_ is an online tool for storing information -and organizing volunteers and donors. It is used primarily for digital organizing and event mangement. For more information, see `Action Network developer docs `_ +about and organizing volunteers and donors. It is used primarily for digital organizing and event mangement. For more information, see `Action Network developer docs `_ .. note:: Authentication - Only ActionNetwork accounts of the partner tier are able to access their API. You can generate your key from the API & Sync page, located in the *Start Organizing* menu, under *Details*. + Only ActionNetwork accounts of the partner tier are able to access their API. You can generate your key from the API & Sync page, located in the *Start Organizing* menu, in the right column. *********** Quick Start @@ -33,7 +33,7 @@ You can then call various endpoints: .. code-block:: python # List all people stored in Action Network - all_contacts = an.get_people() + all_contacts = an.get_people_list() # Add a person an.add_person('person.email@fakeemail.com') diff --git a/docs/airtable.rst b/docs/airtable.rst index 5046ff7770..e46080e5e8 100644 --- a/docs/airtable.rst +++ b/docs/airtable.rst @@ -5,10 +5,10 @@ Airtable Overview ******** -The Airtable class allows you to interact with an `Airtable `_ base. In order to use this class +The Airtable class allows you to interact with an `Airtable `_. base. In order to use this class you must generate an Airtable API Key which can be found in your Airtable `account settings `_. -.. note:: +.. note:: Finding The Base Key The easiest place to find the ``base_key`` for the base that you wish to interact with is via the Airtable API documentation. * Go to the `Airtable API Base List `_ and select the base. diff --git a/docs/auth0.rst b/docs/auth0.rst deleted file mode 100644 index 800ca36995..0000000000 --- a/docs/auth0.rst +++ /dev/null @@ -1,38 +0,0 @@ -Auth0 -========= - -******** -Overview -******** - -`Auth0 `_ is an authentication and authorization platform. This Parsons integration with the `Auth0 Management API `_ supports fetching and deleting user records. - -********** -Quickstart -********** - -To instantiate the Auth0 class, you can either store your Auth0 API client ID, client secret, and domain as environment variables (``AUTH0_CLIENT_ID``, ``AUTH0_CLIENT_SECRET``, and ``AUTH0_DOMAIN``, respectively) or pass in your client ID, client secret, and domain as arguments: - -.. code-block:: python - - from parsons import Auth0 - - # First approach: Use API credentials via environmental variables - auth0 = Auth0() - - # Second approach: Pass API credentials as arguments - auth0 = Shopify('auth0_client_id', 'auth0_client_secret', 'auth0_domain') - -You can then call various endpoints: - -.. code-block:: python - - # Fetch user by email - user = auth0.get_users_by_email('fakeemail@fakedomain.com') - -*** -API -*** - -.. autoclass :: parsons.Auth0 - :inherited-members: diff --git a/docs/bluelink.rst b/docs/bluelink.rst index 985540a62a..459eef7ab3 100644 --- a/docs/bluelink.rst +++ b/docs/bluelink.rst @@ -5,13 +5,22 @@ Bluelink Overview ******** -`Bluelink `_ is an online tool for connecting various `digital software tools `_ used by campaigns and movement groups in the political and non-profit space so you can sync data between them. This integration currently supports sending your structured person data and related tags to Bluelink via the `Bluelink Webhook API `_, after which you can use Bluelink's UI to send to any of their `supported tools `_. If you don't see a tool you would like to connect to, please reach out at hello@bluelink.org to ask them to add it. +`Bluelink `_ is an online tool for connecting the various `digital software tools `_ +used by campaigns and movement groups in the political and non-profit space to allow you to seamlessly and easily sync data between them. +This integration currently supports sending your structured person data and related tags to Bluelink via the +`Bluelink Webhook API `_, after which you can use our UI to send to any of our +`supported tools `_. If you don't see a tool you would like to connect to, please reach out at +hello@bluelink.org to ask us to add it. + + .. note:: Authentication - If you don't have a Bluelink account please complete the `form `_ on the Bluelink website or email them at hello@bluelink.org. To get connection credentials select `Bluelink Webhook `_ from the apps menu. If you don't see this option, you may need to ask an account administrator to do this step for you. - - The credentials are automatically embedded into a one time secret link in case they need to be sent to you. Open the link to access the user and password. + If you don't have a Bluelink account please complete the `form `_ on our website or email us at hello@bluelink.org. + To get connection credentials select or ask an account administrator to select `Bluelink Webhook `_ + from the apps menu. The credentials are automatically embedded into a one time secret link in case they need to be sent to you. + Open the link to access the user and password, that you will then either pass directly to the Bluelink connector as arguments, + or set them as environment variables. ========== Quickstart diff --git a/docs/build_a_connector.rst b/docs/build_a_connector.rst index f70436833b..8873456f93 100644 --- a/docs/build_a_connector.rst +++ b/docs/build_a_connector.rst @@ -2,4 +2,314 @@ How to Build a Connector ======================== -The "building a new connector guide" has been moved to the Parsons website! You can find it `here `_. +Connector classes are at the heart of the Parsons project. When we want to add a new service for users to connect to with Parsons, we build a new Connector class for that service. + +The documentation contains `a complete list `_ of existing connectors. Requests for new connectors are made and discussed in `our issue tracker `_. Before starting to build a new connector, check to see if there’s any discussion about it in the tracker. Ideally, you’ll have a good sense of what you and/or other users want the connector to do before you start trying to build it. Remember, you can always reach out to the community and ask for advice! + +When you’re ready to get started, make sure you have Parsons installed and that the tests run successfully. + +--------------- +Getting Started +--------------- + +The first thing you’ll need to do is create a new folder for your connector. This folder should have the same name as the module (file) within the folder, and the same name as the connector class. For example, the airtable connector is in the “airtable” folder, and the hustle connector is in the “hustle” folder. + +Inside the folder, create two files. The first should be named __init__.py and should be empty. The second will have the same name as your folder - this is the file which will have your connector’s code. For example, in the airtable folder this file is called airtable.py and in the hustle folder it’s called hustle.py. + +The directory should look like this: + +.. code-block:: python + + yourconnectorname/ + __init__.py + yourconnectorname.py + +Next, add the reference to your connector to `parsons/__init__.py`. Specifically, open `parsons/__init__.py`, scroll to the end of the other imports, and add the following: + +.. code-block:: python + + from parsons.yourconnectorname.yourconnectorname import yourconnectorname + +Also, in `parsons/__init__.py` add 'yourconnectorname' to the end of the list `__all__`. + +Once this is done, open the yourconnectorname.py file. At the top of the file, add the following code to enable logging for our connector: + +.. code-block:: python + + import logging + + + logger = logging.getLogger(__name__) + +You’ll also want to create the Connector class itself: + +.. code-block:: python + + class YourConnectorName(object): + """ + Instantiate class. + + `Args:` + """ + + def __init__(self, api_key=None): + pass + +The text enclosed in triple quotes “”” “”” is called a DocString, and is used to provide information about the class. Typically, it includes the arguments accepted by the __init__ method of the class. + +The __init__ method defines how the class is instantiated. For instance, if you want to get an instance of the Connector class by writing `connector = YourConnectorName(table_name, api_key)` you’d have to add a table_name argument to go with the api_key argument. Your connector’s init statement will probably require a different set of arguments than we’ve written here, but this makes for a good start. + +In our Parsons connector classes, the __init__ method should handle authentication. That is, when we initialize our Connector, we should give it credentials so that it can connect to the third-party service. Then we won’t have to worry about authenticating in the other methods. How exactly you authenticate to the service will depend on the service, but it typically involves getting an api_key or access_token, and it almost always involves creating an account on the service. + +(Users of your connector class will need to know how to authenticate too! Take notes of where you signed up for an account and how you got the api key, access token, etc so you can include it in the documentation for your connector.) + +We like to give users two different options for getting api keys and other authentication to the connector - passing them as arguments to the __init__ method, and storing them as environmental variables. Use the Parsons utility checkenv to allow for either possibility with code that looks like this: + +.. code-block:: python + + import logging + from parsons.utilities import check_env + + logger = logging.getLogger(__name__) + + + class YourConnectorName(object): + """ + Instantiate class. + + `Args:` + """ + + def __init__(self, api_key=None): + self.api_key = check_env.check('YOURCONNECTORNAME_API_KEY', api_key) + +This code looks in the environmental variables for the api key and, if it doesn’t find it, uses the api_key passed in. + +Most connectors make extensive use of existing client/providers. Most likely, your next step will be to instantiate one of those existing clients using the authentication data, and add it to the class. You can see an example of this in the `Airtable Connector `_. + +-------- +Patterns +-------- + +Parsons has a number of patterns that should be used when developing a connector to ensure that connectors look alike, which makes them easier to use and modify. Not all patterns apply to all connectors, but when reviewing pull requests, the maintainers will be looking to see if you adhere to the patterns described in this document. + +In the sections below, we will attempt to enumerate the established patterns. We will use the `parsons.mailchimp.mailchimp.Mailchimp` connector as an example of how to implement the patterns. + +^^^^^^^^^^^^^^^^^^^^ +Class initialization +^^^^^^^^^^^^^^^^^^^^ + +**Allow configuration of a connector with environment variables as well as arguments passed to the class initializer.** Make use of `parsons.utilities.check_env.check` function to check that the value was provided either as an argument to the initializer, or in the environment. + +**When calling into a web API, use the `parsons.utilities.APIConnector` class.** The `APIConnector` class has a number of methods for making web requests, and using the `APIConnector` helps enforce consistency across connectors. The `APIConnector` is a wrapper around the Python `requests` library. + + +Mailchimp example: + +.. code-block:: python + + from parsons.utilities import check_env + from parsons.utilities.api_connector import APIConnector + + + class Mailchimp(): + """ + Instantiate Mailchimp Class + + `Args:` + api_key: + The Mailchimp-provided application key. Not required if + ``MAILCHIMP_API_KEY`` env variable set. + `Returns:` + Mailchimp Class + """ + + def __init__(self, api_key=None): + self.api_key = check_env.check('MAILCHIMP_API_KEY', api_key) + self.domain = re.findall("(?<=-).+$", self.api_key)[0] + self.uri = f'https://{self.domain}.api.mailchimp.com/3.0/' + self.client = APIConnector(self.uri, auth=('x', self.api_key)) + +In the `__init__` method above, the Mailchimp class takes one argument: `api_key`. The argument has a default value of `None`, which allows for a user to initialize the connector without any arguments (ie `Mailchimp()`. If no value is passed for `api_key` as an argument to the `__init__` method, then the `check_env.check` function will attempt to retrieve the value from the `MAILCHIMP_API_KEY` environment variable. If the value is neither passed in as argument nor in the environment, the `check_env.check` method will raise a `KeyError` exception. + +In the last line of the code snippet above, the `Mailchimp` class creates an `APIConnector` class, providing the root URL for the API (`self.uri`). The Mailchimp API accepts basic authentication as an authentication mechanism, so the `Mailchimp` connector is able to pass the `api_key` to the `APIConnector` via the `auth` keyword argument. If the API for your connector does not support basic authentication, you may need to implement your own authentication (e.g. via request headers). + +^^^^^^^^^^^^^^^^^^^^^^^^ +Your connector’s methods +^^^^^^^^^^^^^^^^^^^^^^^^ + +**The methods of your connector should generally mirror the endpoints of the API.** Every API is different, but the connector should generally look like the API it is connecting to. Methods of your connector should reference the resources the API is using (e.g. “people”, “members”, “events”). + +The following lists rules for naming common endpoints: + +* GET - single record - *get_* (e.g. get_event, get_person) +* GET - multiple records - *get_s* (e.g. get_members, get_people) +* POST - single record - *create_* (e.g. create_person, create_tag) +* PUT - single record - *update_* (e.g. update_person, update_event) +* DELETE - single record - *delete_* (e.g. delete_member) + +**A method’s arguments should mirror the parameters of the API endpoint it is calling.** Optional parameters should be optional in your method signature (i.e. default to `None`). + +**Use Python docstrings to document every public method of your class.** The docstrings for your public methods are used to automatically generate documentation for your connector. Having this documentation for every method makes it easier for users to pick up your connector. + +**Methods returning multiple values should return a Parsons Table.** If the list of results is empty, return an empty Parsons `Table` (not `None`). Methods returning a single value should just return the value. If the API could not find the value (eg, the ID provided for a resource was not found), return a `None` value from the method. + +Mailchimp example: + +.. code-block:: python + + class Mailchimp(): + + def get_lists(self, fields=None, exclude_fields=None, + count=None, offset=None, before_date_created=None, + since_date_created=None, before_campaign_last_sent=None, + since_campaign_last_sent=None, email=None, sort_field=None, + sort_dir=None): + """ + Get a table of lists under the account based on query parameters. Note + that argument descriptions here are sourced from Mailchimp's official + API documentation. + + `Args:` + fields: list of strings + A comma-separated list of fields to return. Reference + parameters of sub-objects with dot notation. + exclude_fields: list of strings + A comma-separated list of fields to exclude. Reference + parameters of sub-objects with dot notation. + count: int + The number of records to return. Default value is 10. Maximum + value is 1000. + offset: int + The number of records from a collection to skip. Iterating over + large collections with this parameter can be slow. Default + value is 0. + before_date_created: string + Restrict response to lists created before the set date. We + recommend ISO 8601 time format: 2015-10-21T15:41:36+00:00. + since_date_created: string + Restrict results to lists created after the set date. We + recommend ISO 8601 time format: 2015-10-21T15:41:36+00:00. + before_campaign_last_sent: string + Restrict results to lists created before the last campaign send + date. We recommend ISO 8601 time format: + 2015-10-21T15:41:36+00:00. + since_campaign_last_sent: string + Restrict results to lists created after the last campaign send + date. We recommend ISO 8601 time format: + 2015-10-21T15:41:36+00:00. + email: string + Restrict results to lists that include a specific subscriber's + email address. + sort_field: string, can only be 'date_created' or None + Returns files sorted by the specified field. + sort_dir: string, can only be 'ASC', 'DESC', or None + Determines the order direction for sorted results. + + `Returns:` + Table Class + """ + params = {'fields': fields, + 'exclude_fields': exclude_fields, + 'count': count, + 'offset': offset, + 'before_date_created': before_date_created, + 'since_date_created': since_date_created, + 'before_campaign_last_sent': before_campaign_last_sent, + 'since_campaign_last_sent': since_campaign_last_sent, + 'email': email, + 'sort_field': sort_field, + 'sort_dir': sort_dir} + + response = self.get_request('lists', params=params) + tbl = Table(response['lists']) + logger.info(f'Found {tbl.num_rows} lists.') + if tbl.num_rows > 0: + return tbl + else: + return Table() + + +The `get_lists` method corresponds to the `GET /lists `_ endpoint on the Mailchimp API. The method has a number of arguments (all optional), all of which are described in the docstring. The arguments are then mapped to the name of the endpoints’ parameters, and passed to the `APIConnector`’s `get_request` method. + +The method can return more than one record, so the results of the call to the API are wrapped in a Parsons `Table`. If there are no results from the call, an empty table is returned. + +------------ +Finishing up +------------ + +^^^^^^^^^^^^^^^ +Testing locally +^^^^^^^^^^^^^^^ + +In order to test locally, you will need to install the version of Parsons that you have been working on. To do that, you will need to install in "editable" mode, which allows you to import your local Parsons code instead of the released code. + +To install Parsons in "editable" mode, run the following, where `` is the path to the root of the Parsons repository on your local machine. + +```bash +pip install -e +``` + +^^^^^^^^^^^^^^^^^^^^^^ +Adding automated tests +^^^^^^^^^^^^^^^^^^^^^^ + + * Add a folder *test_yourconnectorname* in parsons/test for your connector + * Add a file *test_yourconnectorname.py* to the *test_yourconnectorname* folder + * Use the code below as a starting point for your tests + * Add one `“Happy Path” `_ test per public method of your connector + * When possible mock out any external integrations, otherwise mark your test using the ``unittest.skipIf`` decorator (for an example, see test/test_s3.py) + + For a more detailed guide on writing unit tests, see :doc:`How to Write Tests for Parsons Connectors ` + +.. code-block:: python + + from parsons.yourconnector.yourconnector import YourConnector + import unittest + import requests_mock + + from parsons.yourconnector.yourconnector import YourConnector + import unittest + import requests_mock + + class TestYourConnector(unittest.TestCase): + + def setUp(self): + + # add any setup code here to run before each test + pass + + def tearDown(self): + + # add any teardown code here to run after each test + pass + + @requests_mock.Mocker() + def test_get_things(self, m): + + # Test that campaigns are returned correctly. + m.get(‘http://yourconnector.com/v1/things’, json=[]) + yc = YourConnector() + tbl = yc.get_things() + + self.assertEqual(tbl.num_rows, 0) + +^^^^^^^^^^^^^^^^^^^^ +Adding documentation +^^^^^^^^^^^^^^^^^^^^ + + * Add *yourconnectorname.rst* to the parsons/docs folder. + * Use the parsons/docs/_template.rst file as a guide for the documentation for your connector. + * Add a reference to your connector’s doc file to the parsons/docs/index.rst + * You just need to add the filename without the .rst extension (ie *yourconnector*) + * Be sure to add *yourconnector* in alphabetical order + +^^^^^^^^^^^ +Final steps +^^^^^^^^^^^ + + * Add any new dependencies to the parsons/requirements.txt file + * Run the entire suite of Parsons unit tests using the `pytest -rf test` command + * Run the linter against Parsons using `flake8 --max-line-length=100 parsons` + * Double-check that you have committed all of your code changes to your branch, and that you have pushed your branch to your fork + * Open a pull request against the move-coop/parsons repository diff --git a/docs/conf.py b/docs/conf.py index 84de54eaca..2bdf0ecd77 100755 --- a/docs/conf.py +++ b/docs/conf.py @@ -14,21 +14,20 @@ # import os import sys - # import parsons # sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath("../")) +sys.path.insert(0, os.path.abspath('../')) # -- Project information ----------------------------------------------------- -project = "Parsons" -copyright = "2019, The Movement Cooperative" -author = "The Movement Cooperative" +project = u'Parsons' +copyright = u'2019, The Movement Cooperative' +author = u'The Movement Cooperative' # The short X.Y version -version = "" +version = u'' # The full version, including alpha/beta/rc tags -release = "" +release = u'0.5' # -- General configuration --------------------------------------------------- @@ -41,27 +40,26 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.coverage", - "sphinx.ext.imgmath", - "sphinx.ext.viewcode", - "myst_parser", - "sphinx_multiversion", + 'sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.imgmath', + 'sphinx.ext.viewcode', + 'myst_parser' ] # Sorting of attributes -autodoc_member_order = "bysource" +autodoc_member_order = 'bysource' # Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] +templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # -source_suffix = [".rst", ".md"] +source_suffix = ['.rst', '.md'] # The master toctree document. -master_doc = "index" +master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -73,10 +71,10 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "_template.rst"] +exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store', '_template.rst'] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" +pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- @@ -84,21 +82,19 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = "sphinx_rtd_theme" +html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # -html_theme_options = { - "display_version": True, -} +# html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. @@ -108,20 +104,13 @@ # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # -html_sidebars = {"**": ["versions.html"]} - -try: - html_context -except NameError: - html_context = dict() - -html_context["display_versions_lower_left"] = True +# html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = "Parsonsdoc" +htmlhelp_basename = 'Parsonsdoc' # -- Options for LaTeX output ------------------------------------------------ @@ -130,12 +119,15 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. # # 'preamble': '', + # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -145,13 +137,8 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - ( - master_doc, - "Parsons.tex", - "Parsons Documentation", - "The Movement Cooperative", - "manual", - ), + (master_doc, 'Parsons.tex', u'Parsons Documentation', + u'The Movement Cooperative', 'manual'), ] @@ -159,7 +146,10 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "parsons", "Parsons Documentation", [author], 1)] +man_pages = [ + (master_doc, 'parsons', u'Parsons Documentation', + [author], 1) +] # -- Options for Texinfo output ---------------------------------------------- @@ -168,28 +158,10 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ( - master_doc, - "Parsons", - "Parsons Documentation", - author, - "Parsons", - "One line description of project.", - "Miscellaneous", - ), + (master_doc, 'Parsons', u'Parsons Documentation', + author, 'Parsons', 'One line description of project.', + 'Miscellaneous'), ] # -- Extension configuration ------------------------------------------------- - -# sphinx-multiversion - -DOCUMENTED_VERSIONS = ["v0.18.1", "v0.18.0", "v0.17.0", "v0.16.0", "v0.15.0", "v0.14.0"] - -# Whitelist pattern for branches -smv_branch_whitelist = ( - r"^stable|latest$" # creates version for latest master/main branch -) - -# Get tags to whitelist from DOCUMENTED_VERSIONS const -smv_tag_whitelist = "|".join(["^" + version + "$" for version in DOCUMENTED_VERSIONS]) diff --git a/docs/contributing.rst b/docs/contributing.rst index a299f56f74..a43a1f06d3 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -2,5 +2,5 @@ Contributing to Parsons ======================= - -The contributing guide has been moved to the Parsons website! You can find it `here `_. +.. include:: ../CONTRIBUTING.md + :parser: myst_parser.sphinx_ \ No newline at end of file diff --git a/docs/donorbox.rst b/docs/donorbox.rst deleted file mode 100644 index 4107a8aba2..0000000000 --- a/docs/donorbox.rst +++ /dev/null @@ -1,70 +0,0 @@ -Donorbox -======== - -******** -Overview -******** - -`Donorbox `_ is an online donation platform through which donors can make one-off or -recurring donations. This Parsons class provides methods for extracting donors, campaigns, donations, and plans. - -The documentation for the underlying Donorbox API can be found `here `_. - -.. note:: - To authenticate, go to your account on donorbox.org and select the "API & Zapier Integration" option - under Add-ons. Enable the add-on. (Note that currently Donorbox charges to enable this feature.) - Once the add-on is enabled, hit the "Set new API key" button and copy the generated key. - -********** -Quickstart -********** - -To instantiate the Donorbox class, you can either store the Donorbox accont email and -API key as environmental variables (``DONORBOX_ACCOUNT_EMAIL``, ``DONORBOX_API_KEY``) -or pass them in as arguments: - -.. code-block:: python - - from parsons import Donorbox - - # First approach: Use API key and account email via environmental variables - donorbox = Donorbox() - - # Second approach: Pass API credentials and user email as arguments - donorbox = Donorbox(email='me@myorg.com', api_key='MYAPIKEY') - -You can then call various endpoints: - -.. code-block:: python - - # Get campaigns - - campaigns = donorbox.get_campaigns() # get all campaigns - campaigns = donorbox.get_campaigns(name="My campaign") # get campaigns by name - campaigns = donorbox.get_campaigns(name="My campaign", order="desc") # results in descending order - - # Get donations - - donations = donorbox.get_donations() # get all donations - donations = donorbox.get_donations(date_to="2022-10-22") # get all donations before date - donations = donorbox.get_donations(campaign_name="My campaign") # filter donations by campaign - - # Get donors - - donors = donorbox.get_donors() # get all donors - donors = donorbox.get_donors(email="example@example.org") # get donors by email - donors = donorbox.get_donors(page=1, per_page=10) # use pagination - - # Get plans - - plans = donorbox.get_plans() # get all plans - plans = donorbox.get_plans(date_from="2022-10-22") # get plans started after date - - -*** -API -*** - -.. autoclass :: parsons.Donorbox - :inherited-members: - diff --git a/docs/google.rst b/docs/google.rst index 84b4060934..3f0d6c4a0a 100644 --- a/docs/google.rst +++ b/docs/google.rst @@ -1,57 +1,12 @@ Google ====== -Google Cloud services allow you to upload and manipulate Tables as spreadsheets (via GoogleSheets) or query them as SQL database tables (via GoogleBigQuery). You can also upload/store/download them as binary objects (via GoogleCloudStorage). Google also offers an API for civic information using GoogleCivic and admin information using the Google Workspace Admin SDK. +Google Cloud services allow you to upload and manipulate Tables as spreadsheets (via GoogleSheets) or query them as SQL database tables (via GoogleBigQuery). You can also upload/store/download them as binary objects (via GoogleCloudStorage). Finally, Google offers an API for civic information using GoogleCivic. -For all of these services you will need to enable the APIs for your Google Cloud account and obtain authentication tokens or other credentials to access them from your scripts. If you are the administrator of your Google Cloud account, you can do both of these at `Google Cloud Console APIs and Services `_. The connectors below have more specific information about how to authenticate. +For all of these services you will need to enable the APIs for your Google Cloud account and obtain authentication tokens to access them from your scripts. If you are the administrator of your Google Cloud account, you can do both of these at `Google Cloud Console APIs and Services `_. .. _gbq: -************* -Google Admin -************* - -======== -Overview -======== - -The GoogleAdmin class allows you to get information about groups and members in Google Admin. - -In order to instantiate the class, you must pass Google service account credentials as a dictionary, or store the credentials as a JSON string in the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable. You must also provide an email address for `domain-wide delegation `_. - -========== -Quickstart -========== - -To instantiate the GoogleAdmin class, you can either pass the constructor a dict containing your Google service account credentials or define the environment variable ``GOOGLE_APPLICATION_CREDENTIALS`` to contain a JSON encoding of the dict. - -.. code-block:: python - - from parsons import GoogleAdmin - - # First approach: Use API credentials via environmental variables - admin = GoogleAdmin(None, 'fakeemail@fakedomain.com') - - # Second approach: Pass API credentials as argument - credential_filename = 'google_application_credentials.json' - credentials = json.load(open(credential_filename)) - sheets = GoogleSheets(credentials, 'fakeemail@fakedomain.com') - -You can then get information about groups and members using instance methods: - -.. code-block:: python - - members = admin.get_all_group_members('group_key') - groups = admin.get_all_groups(domain='fakedomain.com') - -=== -API -=== - -.. autoclass:: parsons.google.google_admin.GoogleAdmin - :inherited-members: - - ******** BigQuery ******** @@ -252,21 +207,13 @@ Overview The GoogleSheets class allows you to interact with Google service account spreadsheets, called "Google Sheets." You can create, modify, read, format, share and delete sheets with this connector. -In order to instantiate the class, you must pass Google service account credentials as a dictionary, or store the credentials as a JSON file locally and pass the path to the file as a string in the ``GOOGLE_DRIVE_CREDENTIALS`` environment variable. You can follow these steps: - -- Go to the `Google Developer Console `_ and make sure the "Google Drive API" and the "Google Sheets API" are both enabled. -- Go to the credentials page via the lefthand sidebar. On the credentials page, click "create credentials". -- Choose the "Service Account" option and fill out the form provided. This should generate your credentials. -- Select your newly created Service Account on the credentials main page. -- select "keys", then "add key", then "create new key". Pick the key type JSON. The credentials should start to automatically download. - -You can now copy and paste the data from the key into your script or (recommended) save it locally as a JSON file. +In order to instantiate the class, you must pass Google service account credentials as a dictionary, or store the credentials as a JSON string in the ``GOOGLE_DRIVE_CREDENTIALS`` environment variable. Typically you'll get the credentials from the Google Developer Console (look for the "Google Drive API"). ========== Quickstart ========== -To instantiate the GoogleSheets class, you can either pass the constructor a dict containing your Google service account credentials or define the environment variable ``GOOGLE_DRIVE_CREDENTIALS`` to contain a path to the JSON file containing the dict. +To instantiate the GoogleSheets class, you can either pass the constructor a dict containing your Google service account credentials or define the environment variable ``GOOGLE_DRIVE_CREDENTIALS`` to contain a JSON encoding of the dict. .. code-block:: python @@ -288,8 +235,6 @@ You can then create/modify/retrieve documents using instance methods: sheets.append_to_sheet(sheet_id, people_with_cell_phones) parsons_table = sheets.get_worksheet(sheet_id) -You may also want to share the document with your service or user account. - === API === diff --git a/docs/html/_images/favicon.ico b/docs/html/_images/favicon.ico new file mode 100644 index 0000000000..bb439c0fcb Binary files /dev/null and b/docs/html/_images/favicon.ico differ diff --git a/docs/html/_images/parsons_diagram.png b/docs/html/_images/parsons_diagram.png new file mode 100644 index 0000000000..bfdb7760d8 Binary files /dev/null and b/docs/html/_images/parsons_diagram.png differ diff --git a/docs/html/_modules/index.html b/docs/html/_modules/index.html new file mode 100644 index 0000000000..507f2717bd --- /dev/null +++ b/docs/html/_modules/index.html @@ -0,0 +1,310 @@ + + + + + + + + + + + Overview: module code — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Overview: module code
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

All modules for which code is available

+ + +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/action_kit/action_kit.html b/docs/html/_modules/parsons/action_kit/action_kit.html new file mode 100644 index 0000000000..9a418bb66f --- /dev/null +++ b/docs/html/_modules/parsons/action_kit/action_kit.html @@ -0,0 +1,949 @@ + + + + + + + + + + + parsons.action_kit.action_kit — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.action_kit.action_kit
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.action_kit.action_kit

+import json
+import logging
+import requests
+import time
+
+from parsons.etl.table import Table
+from parsons.utilities import check_env
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class ActionKit(object): + """ + Instantiate the ActionKit class + + `Args:` + domain: str + The ActionKit domain (e.g. ``myorg.actionkit.com``) Not required if + ``ACTION_KIT_DOMAIN`` env variable set. + username: str + The authorized ActionKit username. Not required if ``ACTION_KIT_USERNAME`` env + variable set. + password: str + The authorized ActionKit user password. Not required if ``ACTION_KIT_PASSWORD`` + env variable set. + """ + + _default_headers = {'content-type': 'application/json', + 'accepts': 'application/json'} + + def __init__(self, domain=None, username=None, password=None): + + self.domain = check_env.check('ACTION_KIT_DOMAIN', domain) + self.username = check_env.check('ACTION_KIT_USERNAME', username) + self.password = check_env.check('ACTION_KIT_PASSWORD', password) + self.conn = self._conn() + + def _conn(self, default_headers=_default_headers): + + client = requests.Session() + client.auth = (self.username, self.password) + client.headers.update(default_headers) + return client + + def _base_endpoint(self, endpoint, entity_id=None): + # Create the base endpoint URL + + url = f'https://{self.domain}/rest/v1/{endpoint}/' + + if entity_id: + return url + f'{entity_id}/' + return url + + def _base_get(self, endpoint, entity_id=None, exception_message=None, params=None): + # Make a general get request to ActionKit + + resp = self.conn.get(self._base_endpoint(endpoint, entity_id), params=params) + if exception_message and resp.status_code == 404: + raise Exception(self.parse_error(resp, exception_message)) + + return resp.json() + + def _base_post(self, endpoint, exception_message, return_full_json=False, **kwargs): + # Make a general post request to ActionKit + + resp = self.conn.post(self._base_endpoint(endpoint), data=json.dumps(kwargs)) + + if resp.status_code != 201: + raise Exception(self.parse_error(resp, exception_message)) + + # Some of the methods should just return pointer to location of created + # object. + if 'headers' in resp.__dict__ and not return_full_json: + return resp.__dict__['headers']['Location'] + + # Not all responses return a json + try: + return resp.json() + + except ValueError: + return None + + def parse_error(self, resp, exception_message): + # AK provides some pretty robust/helpful error reporting. We should surface them with + # our exceptions. + + if 'errors' in resp.json().keys(): + if isinstance(resp.json()['errors'], list): + exception_message += '\n' + ','.join(resp.json()['errors']) + else: + for k, v in resp.json()['errors'].items(): + exception_message += str('\n' + k + ': ' + ','.join(v)) + + return exception_message + +
[docs] def get_user(self, user_id): + """ + Get a user. + + `Args:` + user_id: int + The user id of the record to get. + `Returns`: + User json object + """ + + return self._base_get(endpoint='user', entity_id=user_id, + exception_message='User not found')
+ +
[docs] def get_user_fields(self): + """ + Get list of valid user fields that can be passed with the + :meth:`ActionKit.create_user` method. + + `Returns`: + List of user fields + """ + + resp = self._base_get(endpoint='user/schema') + + return list(resp['fields'].keys())
+ +
[docs] def create_user(self, email, **kwargs): + """ + Create a user. + + `Args:` + email: str + Email for the user + **kwargs: + Optional arguments and fields to pass to the client. A full list can be found + in the `ActionKit API Documentation <https://roboticdogs.actionkit.com/docs/\ + manual/api/rest/actionprocessing.html>`_. + `Returns:` + User json object + """ + + return self._base_post(endpoint='user', exception_message='Could not create user', + email=email, **kwargs)
+ +
[docs] def update_user(self, user_id, **kwargs): + """ + Update a user. + + `Args:` + user_id: int + The user id of the person to update + **kwargs: + Optional arguments and fields to pass to the client. A full list can be found + in the `ActionKit API Documentation <https://roboticdogs.actionkit.com/docs/\ + manual/api/rest/actionprocessing.html>`_. + `Returns:` + ``None`` + """ + + resp = self.conn.patch(self._base_endpoint('user', user_id), data=json.dumps(kwargs)) + logger.info(f'{resp.status_code}: {user_id}')
+ +
[docs] def update_event(self, event_id, **kwargs): + """ + Update an event. + + `Args:` + event_id: int + The event id of the event to update + **kwargs: + Optional arguments and fields to pass to the client. A full list can be found + in the `ActionKit API Documentation <https://roboticdogs.actionkit.com/docs/\ + manual/api/rest/actionprocessing.html>`_. + `Returns:` + ``None`` + """ + + resp = self.conn.patch(self._base_endpoint('event', event_id), data=json.dumps(kwargs)) + logger.info(f'{resp.status_code}: {event_id}')
+ +
[docs] def delete_user(self, user_id): + """ + Delete a user. + + `Args:` + user_id: int + The user id of the person to delete + `Returns:` + ``None`` + """ + + resp = self.conn.delete(self._base_endpoint('user', user_id)) + logger.info(f'{resp.status_code}: {user_id}')
+ +
[docs] def get_campaign(self, campaign_id): + """ + Get a campaign. + + `Args:` + campaign_id: int + The campaign id of the record. + `Returns`: + Campaign json object + """ + + return self._base_get(endpoint='campaign', entity_id=campaign_id, + exception_message='Campaign not found')
+ +
[docs] def get_campaign_fields(self): + """ + Get list of valid campaign fields that can be passed with the + :meth:`ActionKit.create_campaign` and :meth:`ActionKit.update_campaign` methods. + + `Returns`: + List of campaign fields + """ + + resp = self._base_get(endpoint='campaign/schema') + return list(resp['fields'].keys())
+ +
[docs] def create_campaign(self, name, **kwargs): + """ + Create a campaign. + + `Args:` + name: str + The name of the campaign to create + **kwargs: + Optional arguments and fields to pass to the client. A full list can be found + in the `ActionKit API Documentation <https://roboticdogs.actionkit.com/docs/\ + manual/api/rest/actionprocessing.html>`_. + `Returns`: + API location of new resource + """ + + return self._base_post(endpoint='campaign', exception_message='Could not create campaign', + name=name, **kwargs)
+ +
[docs] def get_event_create_page(self, event_create_page_id): + """ + Get a event create page. + + `Args:` + event_create_page_id: int + The event create page id of the record to get. + `Returns`: + Event create page json object + """ + + return self._base_get(endpoint='eventcreatepage', entity_id=event_create_page_id, + exception_message='Event create page not found')
+ +
[docs] def get_event_create_page_fields(self): + """ + Get list of event create page fields that can be passed with the + :meth:`ActionKit.create_event_create_page`. + + `Returns`: + List of event create page fields + """ + + resp = self._base_get(endpoint='eventcreatepage/schema') + return list(resp['fields'].keys())
+ +
[docs] def create_event_create_page(self, name, campaign_id, title, **kwargs): + """ + Add an event page to a campaign. + + `Args:` + campaign_id: int + The campaign to assoicate page with + name: str + The name of the page to create + title: str + The title of the page to create + **kwargs: + Optional arguments and fields to pass to the client. A full list can be found + in the `ActionKit API Documentation <https://roboticdogs.actionkit.com/docs/\ + manual/api/rest/actionprocessing.html>`_. + `Returns`: + API location of new resource + """ + + return self._base_post(endpoint='eventcreatepage', + exception_message='Could not create event create page', + campaign=f'/rest/v1/campaign/{campaign_id}/', + name=name, + title=title, + **kwargs)
+ +
[docs] def get_event_create_form(self, event_create_form_id): + """ + Get a event create form. + + `Args:` + event_create_form_id: int + The event create form id of the record to get. + `Returns`: + Event create form json object + """ + + return self._base_get(endpoint='eventcreateform', entity_id=event_create_form_id, + exception_message='Event create page not found')
+ +
[docs] def get_event_create_form_fields(self): + """ + Get list of valid event create form fields that can be passed with the + :meth:`ActionKit.create_event_create_form` method. + + `Returns`: + List of event create form fields + """ + + resp = self._base_get(endpoint='eventcreateform/schema') + return list(resp['fields'].keys())
+ +
[docs] def create_event_create_form(self, page_id, thank_you_text, **kwargs): + """ + Create a event create form. + + `Args:` + page_id: int + The page to associate the form with + thank_you_text: str + Free form thank you text + **kwargs: + Optional arguments and fields to pass to the client. A full list can be found + in the `ActionKit API Documentation <https://roboticdogs.actionkit.com/docs/\ + manual/api/rest/actionprocessing.html>`_. + `Returns:` + API location of new resource + """ + + return self._base_post(endpoint='eventcreateform', + exception_message='Could not event create form', + page=f'/rest/v1/eventcreatepage/{page_id}/', + thank_you_text=thank_you_text, + **kwargs)
+ +
[docs] def get_event_signup_page(self, event_signup_page_id): + """ + Get event signup page. + + `Args:` + event_signup_page_id: int + The event signup page id of the record to get. + `Returns`: + Event signup page json object + """ + + return self._base_get(endpoint='eventsignuppage', entity_id=event_signup_page_id, + exception_message='User page signup page not found')
+ +
[docs] def get_event_signup_page_fields(self): + """ + Get list of valid event signup page fields that can be passed with the + :meth:`ActionKit.create_event_signup_page` method. + + `Returns`: + List of event signup page fields + """ + + resp = self._base_get(endpoint='eventsignuppage/schema') + return list(resp['fields'].keys())
+ +
[docs] def create_event_signup_page(self, name, campaign_id, title, **kwargs): + """ + Add an event signup page to a campaign. + + `Args:` + campaign_id: int + The campaign to assoicate page with + name: str + The name of the page to create + title: str + The title of the page to create + **kwargs: + Optional arguments and fields to pass to the client. A full list can be found + in the `ActionKit API Documentation <https://roboticdogs.actionkit.com/docs/\ + manual/api/rest/actionprocessing.html>`_. + `Returns`: + API location of new resource + """ + + return self._base_post(endpoint='eventsignuppage', + exception_message='Could not create signup page', + campaign=f'/rest/v1/campaign/{campaign_id}/', + name=name, + title=title, + **kwargs)
+ +
[docs] def get_event_signup_form(self, event_signup_form_id): + """ + Get a user. + + `Args:` + event_signup_form_id: str + The event signup form id of the record to get. + `Returns`: + Event signup form json object + """ + + return self._base_get(endpoint='eventsignupform', entity_id=event_signup_form_id, + exception_message='User page signup form not found')
+ +
[docs] def get_event_signup_form_fields(self): + """ + Get list of valid event signup form fields that can be passed with the + :meth:`ActionKit.create_event_signup_form` method. + + `Returns`: + List of event signup form fields + """ + + resp = self._base_get(endpoint='eventsignupform/schema') + return list(resp['fields'].keys())
+ +
[docs] def create_event_signup_form(self, page_id, thank_you_text, **kwargs): + """ + Create a event signup form. + + `Args:` + page_id: int + The page to associate the form with + thank_you_text: str + Free form thank you text + **kwargs: + Optional arguments and fields to pass to the client. A full list can be found + in the `ActionKit API Documentation <https://roboticdogs.actionkit.com/docs/\ + manual/api/rest/actionprocessing.html>`_. + `Returns:` + API location of new resource + """ + + return self._base_post(endpoint='eventsignupform', + exception_message='Could not event create signup form', + page=f'/rest/v1/page/{page_id}/', + thank_you_text=thank_you_text, + **kwargs)
+ +
[docs] def update_event_signup(self, event_signup_id, **kwargs): + """ + Update an event signup. + + `Args:` + event_signup_id: int + The id of the event signup to update + event_signup_dict: dict + A dictionary of fields to update for the event signup. + **kwargs: + Optional arguments and fields to pass to the client. A full list can be found + in the `ActionKit API Documentation <https://roboticdogs.actionkit.com/docs/\ + manual/api/rest/actionprocessing.html>`_. + `Returns:` + ``None`` + """ + + resp = self.conn.patch(self._base_endpoint('eventsignup', event_signup_id), + data=json.dumps(kwargs)) + logger.info(f'{resp.status_code}: {event_signup_id}')
+ +
[docs] def get_page_followup(self, page_followup_id): + """ + Get a page followup. + + `Args:` + page_followup_id: int + The user id of the record to get. + `Returns`: + Page followup json object + """ + + return self._base_get(endpoint='pagefollowup', entity_id=page_followup_id, + exception_message='Page followup not found')
+ +
[docs] def get_page_followup_fields(self): + """ + Get list of valid page followup fields that can be passed with the + :meth:`ActionKit.create_page_followup` method. + + `Returns`: + List of page followup fields + """ + + resp = self._base_get(endpoint='pagefollowup/schema') + return list(resp['fields'].keys())
+ +
[docs] def create_page_followup(self, signup_page_id, url, **kwargs): + """ + Add a page followup. + + `Args:` + signup_page_id: int + The signup page to associate the followup page with + url: str + URL of the folloup page + **kwargs: + Optional arguments and fields to pass to the client. A full list can be found + in the `ActionKit API Documentation <https://roboticdogs.actionkit.com/docs/\ + manual/api/rest/actionprocessing.html>`_. + `Returns`: + API location of new resource + """ + + return self._base_post(endpoint='pagefollowup', + exception_message='Could not create page followup', + page=f'/rest/v1/eventsignuppage/{signup_page_id}/', + url=url, + **kwargs)
+ +
[docs] def create_generic_action(self, page, email=None, ak_id=None, **kwargs): + """ + Post a generic action. One of ``ak_id`` or ``email`` is a required argument. + + `Args:` + page: + The page to post the action. The page short name. + email: + The email address of the user to post the action. + ak_id: + The action kit id of the record. + **kwargs: + Optional arguments and fields to pass to the client. A full list can be found + in the `ActionKit API Documentation <https://roboticdogs.actionkit.com/docs/\ + manual/api/rest/actionprocessing.html>`_. + `Returns`: + dict + The response json + """ # noqa: E501,E261 + + if not email or ak_id: + raise ValueError('One of email or ak_id is required.') + + return self._base_post(endpoint='action', + exception_message='Could not create action.', + email=email, + page=page, + return_full_json=True, + **kwargs)
+ +
[docs] def bulk_upload_csv(self, csv_file, import_page, + autocreate_user_fields=False, user_fields_only=False): + """ + Bulk upload a csv file of new users or user updates. + If you are uploading a table object, use bulk_upload_table instead. + See `ActionKit User Upload Documentation <https://roboticdogs.actionkit.com/docs/manual/api/rest/uploads.html>`_ + Be careful that blank values in columns will overwrite existing data. + + If you get a 500 error, try sending a much smaller file (say, one row), + which is more likely to return the proper 400 with a useful error message + + `Args:` + import_page: str + The page to post the action. The page short name. + csv_file: str or buffer + The csv (optionally zip'd) file path or a file buffer object + A user_id or email column is required. + ActionKit rejects files that are larger than 128M + autocreate_user_fields: bool + When True columns starting with "user_" will be uploaded as user fields. + See the `autocreate_user_fields documentation + <https://roboticdogs.actionkit.com/docs/manual/api/rest/uploads.html#create-a-multipart-post-request>`_. + user_fields_only: bool + When uploading only an email/user_id column and user_ user fields, + ActionKit has a fast processing path. + This doesn't work, if you upload a zipped csv though. + `Returns`: + dict + success: whether upload was successful + progress_url: an API URL to get progress on upload processing + res: requests http response object + """ # noqa: E501,E261 + + # self.conn defaults to JSON, but this has to be form/multi-part.... + upload_client = self._conn({'accepts': 'application/json'}) + if isinstance(csv_file, str): + csv_file = open(csv_file, 'rb') + + url = self._base_endpoint('upload') + files = {'upload': csv_file} + data = { + 'page': import_page, + 'autocreate_user_fields': int(autocreate_user_fields), + 'user_fields_only': int(user_fields_only), + } + with upload_client.post(url, files=files, data=data) as res: + progress_url = res.headers.get('Location') + rv = { + 'res': res, + 'success': res.status_code == 201, + 'id': progress_url.split('/')[-2] if progress_url else None, + 'progress_url': progress_url + } + return rv
+ +
[docs] def bulk_upload_table(self, table, import_page, autocreate_user_fields=0, + no_overwrite_on_empty=False, set_only_columns=None): + """ + Bulk upload a table of new users or user updates. + See `ActionKit User Upload Documentation <https://roboticdogs.actionkit.com/docs/manual/api/rest/uploads.html>`_ + Be careful that blank values in columns will overwrite existing data. + + Tables with only an identifying column (user_id/email) and user_ user fields + will be fast-processed -- this is useful for setting/updating user fields. + + .. note:: + If you get a 500 error, try sending a much smaller file (say, one row), + which is more likely to return the proper 400 with a useful error message + + `Args:` + import_page: str + The page to post the action. The page short name. + table: Table Class + A Table of user data to bulk upload + A user_id or email column is required. + autocreate_user_fields: bool + When True columns starting with "user_" will be uploaded as user fields. + `ActionKit <https://actionkit.com/>`_. + See the autocreate_user_fields `documentation <https://roboticdogs.actionkit.com/docs/manual/api/rest/uploads.html#create-a-multipart-post-request>`_. + no_overwrite_on_empty: bool + When uploading user data, ActionKit will, by default, take a blank value + and overwrite existing data for that user. + This can be undesirable, if the goal is to only send updates. + Setting this to True will divide up the table into multiple upload + batches, changing the columns uploaded based on permutations of + empty columns. + set_only_columns: list + This is similar to no_overwrite_on_empty but restricts to a specific set of columns + which, if blank, should not be overwritten. + `Returns`: + dict + success: bool -- whether upload was successful (individual rows may not have been) + results: [dict] -- This is a list of the full results. + progress_url and res for any results + """ # noqa: E501,E261 + + import_page = check_env.check('ACTION_KIT_IMPORTPAGE', import_page) + upload_tables = self._split_tables_no_empties( + table, no_overwrite_on_empty, set_only_columns) + results = [] + for tbl in upload_tables: + user_fields_only = int(not any([ + h for h in tbl.columns + if h != 'email' and not h.startswith('user_')])) + results.append(self.bulk_upload_csv(tbl.to_csv(), + import_page, + autocreate_user_fields=autocreate_user_fields, + user_fields_only=user_fields_only)) + return { + 'success': all([r['success'] for r in results]), + 'results': results + }
+ + def _split_tables_no_empties(self, table, no_overwrite_on_empty, set_only_columns): + table_groups = {} + # uploading combo of user_id and email column should be mutually exclusive + blank_columns_test = table.columns + if not no_overwrite_on_empty: + blank_columns_test = (set(['user_id', 'email'] + (set_only_columns or [])) + .intersection(table.columns)) + for row in table: + blanks = tuple(k for k in blank_columns_test + if row.get(k) in (None, '')) + grp = table_groups.setdefault(blanks, []) + grp.append(row) + results = [] + for blanks, subset in table_groups.items(): + subset_table = Table(subset) + if blanks: + subset_table.table = subset_table.table.cutout(*blanks) + logger.debug(f'Column Upload Blanks: {blanks}') + logger.debug(f'Column Upload Columns: {subset_table.columns}') + if not set(['user_id', 'email']).intersection(subset_table.columns): + logger.warning( + f'Upload will fail without user_id or email. ' + f'Rows: {subset_table.num_rows}, Columns: {subset_table.columns}' + ) + results.append(subset_table) + return results + +
[docs] def collect_upload_errors(self, result_array): + """ + Collect any upload errors as a list of objects from bulk_upload_table 'results' key value. + This waits for uploads to complete, so it may take some time if you uploaded a large file. + `Args:` + result_array: list + After receiving a dict back from bulk_upload_table you may want to see if there + were any errors in the uploads. If you call collect_upload_errors(result_array) + it will iterate across each of the uploads fetching the final result of e.g. + /rest/v1/uploaderror?upload=123 + `Returns`: + [dict] + message: str -- error message + upload: str -- upload progress API path e.g. "/rest/v1/upload/123456/" + id: int -- upload error record id (different than upload id) + """ + errors = [] + for res in result_array: + upload_id = res.get('id') + if upload_id: + while True: + upload = self._base_get(endpoint='upload', entity_id=upload_id) + if not upload or upload.get('status') != 'new': + break + else: + time.sleep(1) + error_data = self._base_get(endpoint='uploaderror', params={'upload': upload_id}) + logger.debug(f'error collect result: {error_data}') + errors.extend(error_data.get('objects') or []) + return errors
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/aws/s3.html b/docs/html/_modules/parsons/aws/s3.html new file mode 100644 index 0000000000..b060f7568b --- /dev/null +++ b/docs/html/_modules/parsons/aws/s3.html @@ -0,0 +1,618 @@ + + + + + + + + + + + parsons.aws.s3 — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.aws.s3

+import re
+import boto3
+from parsons.utilities import files
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class AWSConnection(object):
+
+    def __init__(self, aws_access_key_id=None, aws_secret_access_key=None):
+
+        # Order of operations for searching for keys:
+        #   1. Look for keys passed as kwargs
+        #   2. Look for env variables
+        #   3. Look for aws config file
+        # Boto3 handles 2 & 3, but should handle 1 on it's own. Not sure
+        # why that's not working.
+
+        if aws_access_key_id and aws_secret_access_key:
+
+            self.session = boto3.Session(aws_access_key_id=aws_access_key_id,
+                                         aws_secret_access_key=aws_secret_access_key)
+
+        else:
+            self.session = boto3.Session()
+
+
+
[docs]class S3(object): + """ + Instantiate the S3 class. + + `Args:` + aws_access_key_id: str + The AWS access key id. Not required if the ``AWS_ACCESS_KEY_ID`` env variable + is set. + aws_secret_access_key: str + The AWS secret access key. Not required if the ``AWS_SECRET_ACCESS_KEY`` env + variable is set. + `Returns:` + S3 class. + """ + + def __init__(self, aws_access_key_id=None, aws_secret_access_key=None): + + self.aws = AWSConnection(aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) + + self.s3 = self.aws.session.resource('s3') + """Boto3 API Session Resource object. Use for more advanced boto3 features.""" + + self.client = self.s3.meta.client + """Boto3 API Session client object. Use for more advanced boto3 features.""" + +
[docs] def list_buckets(self): + """ + List all buckets to which you have access. + + `Returns:` + list + """ + + return [bucket.name for bucket in self.s3.buckets.all()]
+ +
[docs] def bucket_exists(self, bucket): + """ + Determine if a bucket exists and you have access to it. + + `Args:` + bucket: str + The bucket name + `Returns:` + boolean + ``True`` if the bucket exists and ``False`` if not. + """ + + try: + # If we can list the keys, the bucket definitely exists. We do this check since + # it will account for buckets that live on other AWS accounts and that we + # have access to. + self.list_keys(bucket) + return True + except Exception: + pass + + return bucket in self.list_buckets()
+ +
[docs] def list_keys(self, bucket, prefix=None, suffix=None, regex=None, + date_modified_before=None, date_modified_after=None, + **kwargs): + """ + List the keys in a bucket, along with extra info about each one. + + `Args:` + bucket: str + The bucket name + prefix: str + Limits the response to keys that begin with the specified prefix. + suffix: str + Limits the response to keys that end with specified suffix + regex: str + Limits the reponse to keys that match a regex pattern + date_modified_before: datetime.datetime + Limits the response to keys with date modified before + date_modified_after: datetime.datetime + Limits the response to keys with date modified after + kwargs: + Additional arguments for the S3 API call. See `AWS ListObjectsV2 documentation + <https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.list_objects_v2>`_ + for more info. + `Returns:` + dict + Dict mapping the keys to info about each key. The info includes 'LastModified', + 'Size', and 'Owner'. + """ + + keys_dict = dict() + logger.debug(f'Fetching keys in {bucket} bucket') + + continuation_token = None + + while True: + args = {'Bucket': bucket} + if prefix: + args['Prefix'] = prefix + if continuation_token: + args['ContinuationToken'] = continuation_token + args.update(kwargs) + + resp = self.client.list_objects_v2(**args) + + for key in resp.get('Contents', []): + + # Match suffix + if suffix and not key['Key'].endswith(suffix): + continue + + # Regex matching + if regex and not bool(re.search(regex, key['Key'])): + continue + + # Match timestamp parsing + if date_modified_before and not key['LastModified'] < date_modified_before: + continue + + if date_modified_after and not key['LastModified'] > date_modified_after: + continue + + # Convert date to iso string + key['LastModified'] = key['LastModified'].isoformat() + + # Add to output dict + keys_dict[key.get('Key')] = key + + # If more than 1000 results, continue with token + if resp.get('NextContinuationToken'): + continuation_token = resp['NextContinuationToken'] + else: + break + + logger.debug(f'Retrieved {len(keys_dict)} keys') + return keys_dict
+ +
[docs] def key_exists(self, bucket, key): + """ + Determine if a key exists in a bucket. + + `Args:` + bucket: str + The bucket name + key: str + The object key + `Returns:` + boolean + ``True`` if key exists and ``False`` if not. + """ + + key_count = len(self.list_keys(bucket, prefix=key)) + + if key_count > 0: + logger.debug(f'Found {key} in {bucket}.') + return True + else: + logger.debug(f'Did not find {key} in {bucket}.') + return False
+ +
[docs] def create_bucket(self, bucket): + """ + Create an s3 bucket. + + .. warning:: + S3 has a limit on the number of buckets you can create in an AWS account, and + that limit is fairly low (typically 100). If you are creating buckets frequently, + you may be mis-using S3, and should consider using the same bucket for multiple tasks. + There is no limit on the number of objects in a bucket. + See `AWS bucket restrictions + <https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html>`_ for more + info. + + .. warning:: + S3 bucket names are *globally* unique. So when creating a new bucket, + the name can't collide with any existing bucket names. If the provided name does + collide, you'll see errors like `IllegalLocationConstraintException` or + `BucketAlreadyExists`. + + `Args:` + bucket: str + The name of the bucket to create + `Returns:` + ``None`` + """ + + self.client.create_bucket(Bucket=bucket)
+ +
[docs] def put_file(self, bucket, key, local_path, acl='bucket-owner-full-control', **kwargs): + """ + Uploads an object to an S3 bucket + + `Args:` + bucket: str + The bucket name + key: str + The object key + local_path: str + The local path of the file to upload + acl: str + The S3 permissions on the file + kwargs: + Additional arguments for the S3 API call. See `AWS Put Object documentation + <https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html>`_ for more + info. + """ + + self.client.upload_file(local_path, bucket, key, ExtraArgs={'ACL': acl, **kwargs})
+ +
[docs] def remove_file(self, bucket, key): + """ + Deletes an object from an S3 bucket + + `Args:` + bucket: str + The bucket name + key: str + The object key + `Returns:` + ``None`` + """ + + self.client.delete_object(Bucket=bucket, Key=key)
+ +
[docs] def get_file(self, bucket, key, local_path=None, **kwargs): + """ + Download an object from S3 to a local file + + `Args:` + local_path: str + The local path where the file will be downloaded. If not specified, a temporary + file will be created and returned, and that file will be removed automatically + when the script is done running. + bucket: str + The bucket name + key: str + The object key + kwargs: + Additional arguments for the S3 API call. See `AWS download_file documentation + <https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.download_file>`_ + for more info. + + `Returns:` + str + The path of the new file + """ + + if not local_path: + local_path = files.create_temp_file_for_path(key) + + self.s3.Object(bucket, key).download_file(local_path, ExtraArgs=kwargs) + + return local_path
+ +
[docs] def get_url(self, bucket, key, expires_in=3600): + """ + Generates a presigned url for an s3 object. + + `Args:` + bucket: str + The bucket name + key: str + The object name + expires_in: int + The time, in seconds, until the url expires + `Returns:` + Url: + A link to download the object + """ + + return self.client.generate_presigned_url(ClientMethod='get_object', + Params={'Bucket': bucket, + 'Key': key}, + ExpiresIn=expires_in)
+ +
[docs] def transfer_bucket(self, origin_bucket, origin_key, destination_bucket, + destination_key=None, suffix=None, regex=None, + date_modified_before=None, date_modified_after=None, + public_read=False, remove_original=False, **kwargs): + """Transfer files between s3 buckets + `Args:` + origin_bucket: str + The origin bucket + origin_key: str + The origin file or prefix + destination_bucket: str + The destination bucket + destination_key: str + If `None` then will retain the `origin key`. If set to prefix will move all + to new prefix + suffix: str + Limits the response to keys that end with specified suffix + regex: str + Limits the reponse to keys that match a regex pattern + date_modified_before: datetime.datetime + Limits the response to keys with date modified before + date_modified_after: datetime.datetime + Limits the response to keys with date modified after + public_read: bool + If the keys should be set to `public-read` + remove_original: bool + If the original keys should be removed after transfer + kwargs: + Additional arguments for the S3 API call. See `AWS download_file documentation + <https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.copy>`_ + for more info. + `Returns:` + ``None`` + """ + # If prefix, get all files for the prefix + if origin_key.endswith('/'): + resp = self.list_keys( + origin_bucket, + prefix=origin_key, + suffix=suffix, + regex=regex, + date_modified_before=date_modified_before, + date_modified_after=date_modified_after + ) + key_list = [value['Key'] for value in resp.values()] + else: + key_list = [origin_key] + + for key in key_list: + # If destination_key is prefix, replace + if destination_key and destination_key.endswith('/'): + dest_key = key.replace(origin_key, destination_key) + + # If single destination, use destination key + elif destination_key: + dest_key = destination_key + + # Else use key from original source + else: + dest_key = key + + copy_source = {'Bucket': origin_bucket, 'Key': key} + self.client.copy(copy_source, destination_bucket, dest_key, ExtraArgs=kwargs) + if remove_original: + try: + self.remove_file(origin_bucket, origin_key) + except Exception as e: + logger.error('Failed to delete original key: ' + str(e)) + + if public_read: + object_acl = self.s3.ObjectAcl(destination_bucket, destination_key) + object_acl.put(ACL='public-read') + + logger.info(f'Finished syncing {len(key_list)} keys')
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/civis/civis.html b/docs/html/_modules/parsons/civis/civis.html new file mode 100644 index 0000000000..3b98a7c48d --- /dev/null +++ b/docs/html/_modules/parsons/civis/civis.html @@ -0,0 +1,340 @@ + + + + + + + + + + + parsons.civis.civis — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.civis.civis

+#from parsons.etl.table import Table
+import civis
+import os
+
+
[docs]class CivisClient(object): + + def __init__(self, db=None, api_key=None): + + if api_key is None: + + try: + k = os.environ['CIVIS_API_KEY'] + except KeyError: + raise KeyError('No Civis API key found. Please store' + ' in environment variable or pass as an' + 'argument.') + + if db is None: + try: + db = os.environ['CIVIS_DATABASE'] + except KeyError: + raise KeyError('No Civis Database kwarg found. Please store' + ' in environment variable or pass as an' + 'argument.') + + self.db = db + + if api_key: + os.environ['CIVIS_API_KEY'] = api_key + +
[docs] def query(self, sql, preview_rows=10, polling_interval=None, hidden=True, + wait=True): + + """Execute a SQL statement as a Civis query. + + Run a query that may return no results or where only a small + preview is required. To execute a query that returns a large number + of rows, see :func:`~civis.io.read_civis_sql`. + + `Parameters` + + sql : str + The SQL statement to execute. + preview_rows : int, optional + The maximum number of rows to return. No more than 100 rows can be + returned at once. + polling_interval : int or float, optional + Number of seconds to wait between checks for query completion. + hidden : bool, optional + If ``True`` (the default), this job will not appear in the Civis UI. + wait: boolean + If ``True``, will wait for query to finish executing before exiting + the method. + + `Returns` + + Parsons Table + See :ref:`parsons-table` for output options. + """ + + fut = civis.io.query_civis(sql, self.db, api_key=self.api_key, + preview_rows=preview_rows, polling_interval=None) + + if not wait: + + return None + + result = fut.result() + + if result['result_rows'] is None: + + return None + + result['result_rows'].insert(0,result['result_columns']) + + return Table.from_columns(result['result_rows'])
+ +
[docs] def table_import(self, table_obj, table, max_errors=None, + existing_table_rows='fail',diststyle=None, distkey=None, + sortkey1=None, sortkey2=None, wait=True, **civisargs): + """Write the table to a Civis Redshift cluster. Additional key word + arguments can passed to `civis.io.dataframe_to_civis() <https://civis-python.readthedocs.io/en/v1.9.0/generated/civis.io.dataframe_to_civis.html#civis.io.dataframe_to_civis>`_ + + table_obj: obj + A Parsons Table object + + table: str + The schema and table you want to upload to. E.g., + 'scratch.table'. Schemas or tablenames with periods + must be double quoted, e.g. 'scratch."my.table"'. + + api_key: str + Your Civis API key. If not given, the CIVIS_API_KEY + environment variable will be used. + + max_errors: int + The maximum number of rows with errors to remove from + the import before failing. + + existing_table_rows: str + The behaviour if a table with the requested name already + exists. One of `'fail'`, `'truncate'`, `'append'` or `'drop'`. + Defaults to `'fail'`. + + diststyle: str + The distribution style for the table. One of `'even'`, `'all'` or + `'key'`. + + distkey: str + The column to use as the distkey for the table. + + sortkey1: str + The column to use as the sortkey for the table. + + sortkey2: str + The second column in a compound sortkey for the table. + + wait: boolean + Wait for write job to complete before exiting method. + + """ + + fut = civis.io.dataframe_to_civis(table_obj.to_df(), database=self.db, + table=table, max_errors=max_errors, + existing_table_rows=existing_table_rows, + diststyle=diststyle, distkey=distkey, + sortkey1=sortkey1, sortkey2=sortkey2, + headers=True, **civisargs) + + if wait: + + fut.result()
+ + + + +
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2018, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/civis/civisclient.html b/docs/html/_modules/parsons/civis/civisclient.html new file mode 100644 index 0000000000..a5241159d3 --- /dev/null +++ b/docs/html/_modules/parsons/civis/civisclient.html @@ -0,0 +1,363 @@ + + + + + + + + + + + parsons.civis.civisclient — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.civis.civisclient
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.civis.civisclient

+import civis
+from parsons.etl.table import Table
+from parsons.utilities import check_env
+
+
+
[docs]class CivisClient(object): + """ + Instantiate the Civis class. + + `Args:` + db: str or int + The Civis Redshift database. Can be a database id or the name of the + database. + api_key: str + The Civis api key. + **kwargs: args + Option settings for the client that are `described in the documentation <https://civis-python.readthedocs.io/en/stable/client.html#civis.APIClient>`_. + `Returns:` + Civis class + """ # noqa: E501 + + def __init__(self, db=None, api_key=None, **kwargs): + + self.db = check_env.check('CIVIS_DATABASE', db) + self.api_key = check_env.check('CIVIS_API_KEY', api_key) + self.client = civis.APIClient(api_key=api_key, **kwargs) + """ + The Civis API client. Utilize this attribute to access to lower level and more + advanced methods which might not be surfaced in Parsons. A list of the methods + can be found by reading the Civis API client `documentation <https://civis-python.readthedocs.io/en/stable/client.html>`_. + """ # noqa: E501 + +
[docs] def query(self, sql, preview_rows=10, polling_interval=None, hidden=True, wait=True): + """ + Execute a SQL statement as a Civis query. + + Run a query that may return no results or where only a small + preview is required. To execute a query that returns a large number + of rows, see :func:`~civis.io.read_civis_sql`. + + `Args` + sql: str + The SQL statement to execute. + preview_rows: int, optional + The maximum number of rows to return. No more than 100 rows can be + returned at once. + polling_interval: int or float, optional + Number of seconds to wait between checks for query completion. + hidden: bool, optional + If ``True`` (the default), this job will not appear in the Civis UI. + wait: boolean + If ``True``, will wait for query to finish executing before exiting + the method. + `Returns` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + fut = civis.io.query_civis(sql, self.db, preview_rows=preview_rows, + polling_interval=polling_interval, hidden=hidden) + + if not wait: + + return None + + result = fut.result() + + if result['result_rows'] is None: + + return None + + result['result_rows'].insert(0, result['result_columns']) + + return Table(result['result_rows'])
+ +
[docs] def table_import(self, table_obj, table, max_errors=None, + existing_table_rows='fail', diststyle=None, distkey=None, + sortkey1=None, sortkey2=None, wait=True, **civisargs): + """ + Write the table to a Civis Redshift cluster. Additional key word + arguments can passed to `civis.io.dataframe_to_civis() <https://civis-python.readthedocs.io/en/v1.9.0/generated/civis.io.dataframe_to_civis.html#civis.io.dataframe_to_civis>`_ # noqa: E501 + + `Args` + table_obj: obj + A Parsons Table object + table: str + The schema and table you want to upload to. E.g., 'scratch.table'. Schemas + or tablenames with periods must be double quoted, e.g. 'scratch."my.table"'. + api_key: str + Your Civis API key. If not given, the CIVIS_API_KEY environment variable will be + used. + max_errors: int + The maximum number of rows with errors to remove from the import before failing. + existing_table_rows: str + The behaviour if a table with the requested name already exists. One of + `'fail'`, `'truncate'`, `'append'` or `'drop'`. Defaults to `'fail'`. + diststyle: str + The distribution style for the table. One of `'even'`, `'all'` or `'key'`. + distkey: str + The column to use as the distkey for the table. + sortkey1: str + The column to use as the sortkey for the table. + sortkey2: str + The second column in a compound sortkey for the table. + wait: boolean + Wait for write job to complete before exiting method. + `Returns` + ``None`` + """ # noqa: E501,E261 + + fut = civis.io.dataframe_to_civis(table_obj.to_dataframe(), database=self.db, + table=table, max_errors=max_errors, + existing_table_rows=existing_table_rows, + diststyle=diststyle, distkey=distkey, + sortkey1=sortkey1, sortkey2=sortkey2, + headers=True, **civisargs) + + if wait: + + fut.result()
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/databases/google_sheets.html b/docs/html/_modules/parsons/databases/google_sheets.html new file mode 100644 index 0000000000..46c540aa44 --- /dev/null +++ b/docs/html/_modules/parsons/databases/google_sheets.html @@ -0,0 +1,439 @@ + + + + + + + + + + + parsons.databases.google_sheets — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.databases.google_sheets
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.databases.google_sheets

+import os
+import json
+import gspread
+from oauth2client.service_account import ServiceAccountCredentials
+from parsons.etl.table import Table
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class GoogleSheets(object): + + def __init__(self, google_keyfile_dict=None): + """ + A connector for Google Sheets, handling data import and export. + + `Args:` + google_keyfile_dict: dict + A dictionary of Google Drive API credentials, parsed from JSON provided + by the Google Developer Console. Required if env variable + ``GOOGLE_DRIVE_CREDENTIALS`` is not populated. + """ + + scope = [ + 'https://spreadsheets.google.com/feeds', + 'https://www.googleapis.com/auth/drive', + ] + + self.google_keyfile_dict = google_keyfile_dict + + if google_keyfile_dict is None: + try: + keyfile_json = os.environ['GOOGLE_DRIVE_CREDENTIALS'] + except KeyError as error: + logger.error("Google credentials missing. Must be specified as an env var or kwarg") + raise error + + self.google_keyfile_dict = json.loads(keyfile_json) + else: + self.google_keyfile_dict = google_keyfile_dict + + credentials = ServiceAccountCredentials.from_json_keyfile_dict( + self.google_keyfile_dict, scope + ) + self.gspread_client = gspread.authorize(credentials) + + def _get_sheet(self, spreadsheet_id, sheet_index=0): + return self.gspread_client.open_by_key(spreadsheet_id).get_worksheet(sheet_index) + +
[docs] def get_sheet_index_with_title(self, spreadsheet_id, title): + """ + Get the first sheet in a Google spreadsheet with the given title. + + `Args:` + spreadsheet_id: str + The ID of the spreadsheet (Tip: Get this from the spreadsheet URL) + title: str + The sheet title + + `Returns:` + str + The sheet index + """ + + sheets = self.gspread_client.open_by_key(spreadsheet_id).worksheets() + for index, sheet in enumerate(sheets): + if sheet.title == title: + return index + raise ValueError(f"Couldn't find sheet with title {title}")
+ +
[docs] def read_sheet(self, spreadsheet_id, sheet_index=0): + """ + Create a ```parsons table``` from a sheet in a Google spreadsheet, given the sheet index. + + `Args:` + spreadsheet_id: str + The ID of the spreadsheet (Tip: Get this from the spreadsheet URL) + sheet_index: int (optional) + The index of the desired worksheet + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + sheet = self._get_sheet(spreadsheet_id, sheet_index) + records = sheet.get_all_records() + + return Table(records)
+ +
[docs] def read_sheet_with_title(self, spreadsheet_id, title): + """ + Create a ```parsons table``` from a sheet in Google spreadsheet, given the sheet title. + + `Args:` + spreadsheet_id: str + The ID of the spreadsheet (Tip: Get this from the spreadsheet URL) + title: str + The sheet title + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + index = self.get_sheet_index_with_title(spreadsheet_id, title) + return self.read_sheet(spreadsheet_id, index)
+ +
[docs] def create_spreadsheet(self, title, editor_email=None): + """ + Create a Google spreadsheet from a Parsons table. Optionally shares the new doc with + the given email address. + + `Args:` + title: str + The human-readable title of the new spreadsheet + editor_email: str (optional) + Email address which should be given permissions on this spreadsheet + + `Returns:` + str + The spreadsheet ID + """ + + spreadsheet = self.gspread_client.create(title) + + if editor_email: + self.gspread_client.insert_permission( + spreadsheet.id, + editor_email, + perm_type='user', + role='writer', + ) + + return spreadsheet.id
+ +
[docs] def delete_spreadsheet(self, spreadsheet_id): + """ + Deletes a Google spreadsheet. + + `Args:` + spreadsheet_id: str + The ID of the spreadsheet (Tip: Get this from the spreadsheet URL) + """ + self.gspread_client.del_spreadsheet(spreadsheet_id)
+ +
[docs] def add_sheet(self, spreadsheet_id, title=None, rows=100, cols=25): + """ + Adds a sheet to a Google spreadsheet. + + `Args:` + spreadsheet_id: str + The ID of the spreadsheet (Tip: Get this from the spreadsheet URL) + rows: int + Number of rows + cols + Number of cols + + `Returns:` + str + The sheet index + """ + spreadsheet = self.gspread_client.open_by_key(spreadsheet_id) + spreadsheet.add_worksheet(title, rows, cols) + sheet_count = len(spreadsheet.worksheets()) + return (sheet_count-1)
+ +
[docs] def append_to_sheet(self, spreadsheet_id, table, sheet_index=0): + """ + Append data from a Parsons table to a Google sheet. Note that the table's columns are + ignored, as we'll be keeping whatever header row already exists in the Google sheet. + + `Args:` + spreadsheet_id: str + The ID of the spreadsheet (Tip: Get this from the spreadsheet URL) + table: obj + Parsons table + sheet_index: int (optional) + The index of the desired worksheet + """ + + sheet = self._get_sheet(spreadsheet_id, sheet_index) + + # Grab the existing data, so we can figure out where to start adding new data as a batch. + # TODO Figure out a way to do a batch append without having to read the whole sheet first. + # Maybe use gspread's low-level batch_update(). + existing_table = self.read_sheet(spreadsheet_id, sheet_index) + + cells = [] + for row_num, row in enumerate(table.data): + for col_num, cell in enumerate(row): + # Add 2 to allow for the header row, and for google sheets indexing starting at 1 + sheet_row_num = existing_table.num_rows + row_num + 2 + cells.append(gspread.Cell(sheet_row_num, col_num + 1, row[col_num])) + + # Update the data in one batch + sheet.update_cells(cells)
+ +
[docs] def overwrite_sheet(self, spreadsheet_id, table, sheet_index=0): + """ + Replace the data in a Google sheet with a Parsons table, using the table's columns as the + first row. + + `Args:` + spreadsheet_id: str + The ID of the spreadsheet (Tip: Get this from the spreadsheet URL) + table: obj + Parsons table + sheet_index: int (optional) + The index of the desired worksheet + """ + + sheet = self._get_sheet(spreadsheet_id, sheet_index) + sheet.clear() + + # Add header row + sheet.append_row(table.columns) + + cells = [] + for row_num, row in enumerate(table.data): + for col_num, cell in enumerate(row): + # We start at row #2 to keep room for the header row we added above + cells.append(gspread.Cell(row_num + 2, col_num + 1, row[col_num])) + + # Update the data in one batch + sheet.update_cells(cells)
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/databases/redshift.html b/docs/html/_modules/parsons/databases/redshift.html new file mode 100644 index 0000000000..1fa3940cd9 --- /dev/null +++ b/docs/html/_modules/parsons/databases/redshift.html @@ -0,0 +1,624 @@ + + + + + + + + + + + parsons.databases.redshift — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.databases.redshift
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.databases.redshift

+from parsons.etl.table import Table
+from parsons.databases.sqldb import SQLDatabase
+from parsons.databases.utilities import RedshiftCreateTable, ManifestGenerator, RedshiftCopyTable
+import psycopg2
+import psycopg2.extras
+import os
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class Redshift(ManifestGenerator, RedshiftCreateTable, RedshiftCopyTable, SQLDatabase): + + def __init__(self, username=None, password=None, host=None, db=None, port=5439, + timeout=10): + """ + A Redshift class to connect to database. + + Args: + username: str + Required if env variable ``REDSHIFT_USERNAME`` not populated + password: str + Required if env variable ``REDSHIFT_PASSWORD`` not populated + host: str + Required if env variable ``REDSHIFT_HOST`` not populated + db: str + Required if env variable ``REDSHIFT_DB`` not populated + port: int + Defaults to standard Redshift port + timeout: int + Seconds to timeout if connection not established + """ + + self.username = username + self.password = password + self.host = host + self.db = db + self.port = port + self.timeout = timeout + + for attr in (username, password, host, db, port): + if attr is None: + + try: + self.username = os.environ['REDSHIFT_USERNAME'] + self.password = os.environ['REDSHIFT_PASSWORD'] + self.host = os.environ['REDSHIFT_HOST'] + self.db = os.environ['REDSHIFT_DB'] + self.port = os.environ['REDSHIFT_PORT'] + + except TypeError as error: + print(error) + print("Connection info missing. Most include as kwarg or " + "env variable.") + + # Petl needs this to create tables + self.dialect = 'postgresql' + +
[docs] def connection(self): + """ + Generate a Redshift connection + + `Returns:` + Psycopg2 `connection` and `cursor` objects + """ + + # Create a psycopg2 connection and cursor + conn = psycopg2.connect(user=self.username, password=self.password, + host=self.host, dbname=self.db, port=self.port, + connect_timeout=self.timeout) + + cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) + + return conn, cur
+ +
[docs] def query(self, sql, commit=True): + """ + Execute a query against the Redshift Database. + + `Args:` + sql: str + A valid SQL statement + commit: boolean + Whether to commit the transaction. + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + + """ + + # To Do: Have it return an ordered dict to return the + # rows in the correct order + + conn, cur = self.connection() + + cur.execute(sql) + logger.info(f'SQL Query: {cur.query}') + if commit: + conn.commit() + + # If the cursor is empty, don't cause an error + if not cur.description: + logger.info('0 rows returned') + response = None + + else: + # Grab the header and combine with the data + data = [[i[0] for i in cur.description]] + data = data + cur.fetchall() + response = Table.from_columns([list(x) for x in data]) + logger.info(f'{response.rows} rows returned.') + + # Close out the connections + cur.close() + conn.close() + + return response
+ +
[docs] def copy_s3(self, table_name, bucket, key, manifest=False, data_type='csv', + delimiter=',', if_exists='fail', max_errors=0, json_path=None, + distkey=None, sortkey=None, padding=None, varchar_max=None, + statupdate=True, compudate=True, ignoreheader=1, acceptanydate=True, + dateformat='auto', timeformat='auto', emptyasnull=True, + blanksasnull=True, acceptinvchars=True, aws_access_key_id=None, + aws_secret_access_key=None): + """ + Copy a file from s3 to Redshift. + + `Args:` + table_name: str + The table name and schema (``tmc.cool_table``) to point the file. + bucket: str + The s3 bucket where the file or manifest is located. + key: str + The key of the file or manifest in the s3 bucket. + manifest: str + If using a manifest + data_type: str + The data type of the files. Either ``csv`` or ``json`` + delimiter: str + The delimiter of the ``csv``. Ignored if data_type is ``json``. + if_exists: str + If the table already exists, either ``fail``, ``append`` or ``drop`` + the table. + max_errors: int + The maximum number of rows that can error and be skipped before + the copy job fails. + json_path: str + An optional json path file path. Only relevant if ``data_type=json``. If + ``None`` with be set to ``auto``. + distkey: str + The column name of the distkey + sortkey: str + The column name of the sortkey + padding: float + A percentage padding to add to varchar columns if creating a new table. This is + helpful to add a buffer for future copies in which the data might be wider. + varchar_max: list + A list of columns in which to set the width of the varchar column to 65,535 + characters. + statupate: boolean + Governs automatic computation and refresh of optimizer statistics at the end + of a successful COPY command. + compudate: boolean + Controls whether compression encodings are automatically applied during a COPY. + ignore_header: int + The number of header rows to skip + acceptanydate: boolean + Allows any date format, including invalid formats such as 00/00/00 00:00:00, to be + loaded without generating an error. + emptyasnull: boolean + Indicates that Amazon Redshift should load empty char and varchar fields + as ``NULL``. + blanksasnull: boolean + Loads blank varchar fields, which consist of only white space characters, + as ``NULL``. + acceptinvchars: boolean + Enables loading of data into VARCHAR columns even if the data contains + invalid UTF-8 characters. + dateformat: str + Set the date format. Defaults to ``auto``. + timeformat: str + Set the time format. Defaults to ``auto``. + aws_access_key_id: + An AWS access key granted to the bucket where the file is located. Not required + if keys are stored as environmental variables. + aws_secret_access_key: + An AWS secret access key granted to the bucket where the file is located. Not + required if keys are stored as environmental variables. + `Returns` + Parsons Table or ``None`` + See :ref:`parsons-table` for output options. + """ + + # Check if table exists + if not self.table_exists(table_name) or if_exists == 'drop': + + # If the table exists and kwarg set to drop, drop it. + if if_exists == 'drop': + self.query('drop table if exists {}'.format(table_name), commit=False) + logger.info(f'{table_name} dropped') + + # Grab the object from s3 + from parsons.aws.s3 import S3 + s3 = S3(aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) + tbl = s3.download_csv(bucket, key) + + # Create the table + sql = self.create_statement(tbl, table_name, padding=padding, + distkey=distkey, sortkey=sortkey, + varchar_max=varchar_max) + + self.query(sql) + + # Fail the job if table exists and kwarg set to fail the method + elif if_exists == 'fail': + + raise ValueError('{} table already exists'.format(table_name)) + + else: + pass + + # Copy the table + copy_sql = self.copy_statement(table_name, bucket, key, manifest=manifest, + data_type=data_type, delimiter=delimiter, + max_errors=max_errors, json_path=json_path, + statupdate=statupdate, compudate=compudate, + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + ignoreheader=ignoreheader, acceptanydate=acceptanydate, + emptyasnull=emptyasnull, blanksasnull=blanksasnull, + acceptinvchars=acceptinvchars, dateformat=dateformat, + timeformat=timeformat) + + self.query(copy_sql)
+ +
[docs] def copy(self, table_obj, table_name, if_exists='fail', max_errors=0, distkey=None, + sortkey=None, padding=None, statupdate=False, compudate=True, acceptanydate=True, + emptyasnull=True, blanksasnull=True, acceptinvchars=True, dateformat='auto', + timeformat='auto', varchar_max=None, aws_access_key_id=None, + aws_secret_access_key=None): + """ + Copy a parsons table object to Redshift. + + `Args:` + table_obj: obj + A ``Parsons Table``. + table_name: str + The table name and schema (``tmc.cool_table``) to point the file. + if_exists: str + If the table already exists, either ``fail``, ``append`` or ``drop`` + the table. + max_errors: int + The maximum number of rows that can error and be skipped before + the copy job fails. + distkey: str + The column name of the distkey + sortkey: str + The column name of the sortkey + padding: float + A percentage padding to add to varchar columns if creating a new table. This is + helpful to add a buffer for future copies in which the data might be wider. + varchar_max: list + A list of columns in which to set the width of the varchar column to 65,535 + characters. + statupate: boolean + Governs automatic computation and refresh of optimizer statistics at the end + of a successful COPY command. + compudate: boolean + Controls whether compression encodings are automatically applied during a COPY. + acceptanydate: boolean + Allows any date format, including invalid formats such as 00/00/00 00:00:00, to be + loaded without generating an error. + emptyasnull: boolean + Indicates that Amazon Redshift should load empty char and varchar fields + as ``NULL``. + blanksasnull: boolean + Loads blank varchar fields, which consist of only white space characters, + as ``NULL``. + acceptinvchars: boolean + Enables loading of data into VARCHAR columns even if the data contains + invalid UTF-8 characters. + dateformat: str + Set the date format. Defaults to ``auto``. + timeformat: str + Set the time format. Defaults to ``auto``. + aws_access_key_id: + An AWS access key granted to the bucket where the file is located. Not required + if keys are stored as environmental variables. + aws_secret_access_key: + An AWS secret access key granted to the bucket where the file is located. Not + required if keys are stored as environmental variables. + `Returns` + Parsons Table or ``None`` + See :ref:`parsons-table` for output options. + """ + + # To Do: + # Compress the table as a gzip before copy + # Auto split big files to copy more quickly + # Consider a json for better stability + # Allow the user to pass in a bucket to store temporary files + + bucket, key = self.temp_bucket_copy(table_obj) + + try: + + self.copy_s3(table_name, bucket, key, data_type='csv', + delimiter=',', if_exists=if_exists, max_errors=max_errors, + distkey=distkey, sortkey=sortkey, padding=padding, ignoreheader=1, + varchar_max=varchar_max, statupdate=statupdate, compudate=compudate, + acceptanydate=acceptanydate, dateformat=dateformat, timeformat=timeformat, + blanksasnull=blanksasnull, emptyasnull=emptyasnull, + acceptinvchars=acceptinvchars, aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) + logger.info(f'{table_name} created.') + + self.temp_bucket_delete(bucket) + + # Delete bucket before raising error + except (psycopg2.ProgrammingError, ValueError) as error: + + self.temp_bucket_delete(bucket) + raise error
+ +
[docs] def table_exists(self, table_name, view=True): + """ + Check if a table exists in the database. + + `Args:` + table_name: str + The table name and schema (``tmc.cool_table``) to point the file. + view: boolean + Check to see if a view exists by the same name + + `Returns:` + boolean + ``True`` if the table exists and ``False`` if it does not. + """ + table_name = table_name.lower().split('.') + + # Check in pg tables for the table + sql = """select count(*) from pg_tables where schemaname='{}' and + tablename='{}';""".format(table_name[0], table_name[1]) + + conn, cur = self.connection() + + cur.execute(sql) + result = cur.fetchone()[0] + + # Check in the pg_views for the table + if view: + sql = """select count(*) from pg_views where schemaname='{}' and + viewname='{}';""".format(table_name[0], table_name[1]) + + cur.execute(sql) + result += cur.fetchone()[0] + + cur.close() + conn.close() + + # If in either, return boolean + if result == 1: + logger.info(f'{table_name[0]}.{table_name[1]} exists.') + return True + else: + logger.info(f'{table_name[0]}.{table_name[1]} does NOT exist.') + return False
+ +
[docs] def generate_manifest(self, buckets, aws_access_key_id=None, aws_secret_access_key=None, + mandatory=True, filter=None, manifest_bucket=None, manifest_key=None, + path=None): + """ + Given a list of S3 buckets, generate a .manifest file (JSON format). + + You can pass in folders to the bucket argument, and this will + only grab keys in those folders. + + AWS keys are not required if ``AWS_ACCESS_KEY_ID`` and + ``AWS_SECRET_ACCESS_KEY`` environmental variables set. + + `Args:` + + buckets: list or str + A list of buckets or single bucket from which to generate manifest + aws_access_key_id: str + AWS access key id to access S3 bucket + aws_secret_access_key: str + AWS secret access key to access S3 bucket + mandatory: boolean + The mandatory flag indicates whether the Redshift COPY should + terminate if the file does not exist. + filter: str + Optional filter for manifest entries + manifest_bucket: str + Optional bucket to write manifest file. + manifest_key: str + Optional key name for S3 bucket to write file + path: str + Optional local path to write the manifest file + + `Returns:` + ``dict`` of manifest + """ + + return self.generate_manifest(buckets, mandatory=mandatory, filter=filter, + manifest_key=manifest_key, manifest_bucket=manifest_bucket, + path=path, aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key)
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/databases/redshift/redshift.html b/docs/html/_modules/parsons/databases/redshift/redshift.html new file mode 100644 index 0000000000..9cf045df92 --- /dev/null +++ b/docs/html/_modules/parsons/databases/redshift/redshift.html @@ -0,0 +1,1126 @@ + + + + + + + + + + + parsons.databases.redshift.redshift — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.databases.redshift.redshift
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.databases.redshift.redshift

+from parsons.etl.table import Table
+from parsons.databases.redshift.rs_copy_table import RedshiftCopyTable
+from parsons.databases.redshift.rs_create_table import RedshiftCreateTable
+from parsons.databases.redshift.rs_table_utilities import RedshiftTableUtilities
+from parsons.databases.redshift.rs_schema import RedshiftSchema
+from parsons.databases.table import BaseTable
+from parsons.utilities import files
+import psycopg2
+import psycopg2.extras
+import os
+import logging
+import json
+import pickle
+import petl
+from contextlib import contextmanager
+import datetime
+import random
+
+# Max number of rows that we query at a time, so we can avoid loading huge
+# data sets into memory.
+# 100k rows per batch at ~1k bytes each = ~100MB per batch.
+QUERY_BATCH_SIZE = 100000
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class Redshift(RedshiftCreateTable, RedshiftCopyTable, RedshiftTableUtilities, RedshiftSchema): + """ + A Redshift class to connect to database. + + Args: + username: str + Required if env variable ``REDSHIFT_USERNAME`` not populated + password: str + Required if env variable ``REDSHIFT_PASSWORD`` not populated + host: str + Required if env variable ``REDSHIFT_HOST`` not populated + db: str + Required if env variable ``REDSHIFT_DB`` not populated + port: int + Required if env variable ``REDSHIFT_PORT`` not populated. Port 5439 is typical. + timeout: int + Seconds to timeout if connection not established + s3_temp_bucket: str + Name of the S3 bucket that will be used for storing data during bulk transfers. + Required if you intend to perform bulk data transfers (eg. the copy_s3 method), + and env variable ``S3_TEMP_BUCKET`` is not populated. + aws_access_key_id: str + The default AWS access key id for copying data from S3 into Redshift + when running copy/upsert/etc methods. + This will default to environment variable AWS_ACCESS_KEY_ID. + aws_secret_access_key: str + The default AWS secret access key for copying data from S3 into Redshift + when running copy/upsert/etc methods. + This will default to environment variable AWS_SECRET_ACCESS_KEY. + iam_role: str + AWS IAM Role ARN string -- an optional, different way for credentials to + be provided in the Redshift copy command that does not require an access key. + """ + + def __init__(self, username=None, password=None, host=None, db=None, port=None, + timeout=10, s3_temp_bucket=None, + aws_access_key_id=None, aws_secret_access_key=None, iam_role=None): + + try: + self.username = username or os.environ['REDSHIFT_USERNAME'] + self.password = password or os.environ['REDSHIFT_PASSWORD'] + self.host = host or os.environ['REDSHIFT_HOST'] + self.db = db or os.environ['REDSHIFT_DB'] + self.port = port or os.environ['REDSHIFT_PORT'] + except KeyError as error: + logger.error("Connection info missing. Most include as kwarg or " + "env variable.") + raise error + + self.timeout = timeout + self.dialect = 'redshift' + self.s3_temp_bucket = s3_temp_bucket or os.environ.get('S3_TEMP_BUCKET') + # We don't check/load the environment variables for aws_* here + # because the logic in S3() and rs_copy_table.py does already. + self.aws_access_key_id = aws_access_key_id + self.aws_secret_access_key = aws_secret_access_key + self.iam_role = iam_role + + @contextmanager + def connection(self): + """ + Generate a Redshift connection. + The connection is set up as a python "context manager", so it will be closed + automatically (and all queries committed) when the connection goes out of scope. + + When using the connection, make sure to put it in a ``with`` block (necessary for + any context manager): + ``with rs.connection() as conn:`` + + `Returns:` + Psycopg2 `connection` object + """ + + # Create a psycopg2 connection and cursor + conn = psycopg2.connect(user=self.username, password=self.password, + host=self.host, dbname=self.db, port=self.port, + connect_timeout=self.timeout) + try: + yield conn + + conn.commit() + finally: + conn.close() + + @contextmanager + def cursor(self, connection): + cur = connection.cursor(cursor_factory=psycopg2.extras.DictCursor) + try: + yield cur + finally: + cur.close() + + def query(self, sql, parameters=None): + """ + Execute a query against the Redshift database. Will return ``None`` + if the query returns zero rows. + + To include python variables in your query, it is recommended to pass them as parameters, + following the `psycopg style <http://initd.org/psycopg/docs/usage.html#passing-parameters-to-sql-queries>`_. + Using the ``parameters`` argument ensures that values are escaped properly, and avoids SQL + injection attacks. + + **Parameter Examples** + + .. code-block:: python + + # Note that the name contains a quote, which could break your query if not escaped + # properly. + name = "Beatrice O'Brady" + sql = "SELECT * FROM my_table WHERE name = %s" + rs.query(sql, parameters=[name]) + + .. code-block:: python + + names = ["Allen Smith", "Beatrice O'Brady", "Cathy Thompson"] + placeholders = ', '.join('%s' for item in names) + sql = f"SELECT * FROM my_table WHERE name IN ({placeholders})" + rs.query(sql, parameters=names) + + `Args:` + sql: str + A valid SQL statement + parameters: list + A list of python variables to be converted into SQL values in your query + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + + """ # noqa: E501 + + with self.connection() as connection: + return self.query_with_connection(sql, connection, parameters=parameters) + + def query_with_connection(self, sql, connection, parameters=None, commit=True): + """ + Execute a query against the Redshift database, with an existing connection. + Useful for batching queries together. Will return ``None`` if the query + returns zero rows. + + `Args:` + sql: str + A valid SQL statement + connection: obj + A connection object obtained from ``redshift.connection()`` + parameters: list + A list of python variables to be converted into SQL values in your query + commit: boolean + Whether to commit the transaction immediately. If ``False`` the transaction will + be committed when the connection goes out of scope and is closed (or you can + commit manually with ``connection.commit()``). + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + # To Do: Have it return an ordered dict to return the + # rows in the correct order + + with self.cursor(connection) as cursor: + + logger.debug(f'SQL Query: {sql}') + cursor.execute(sql, parameters) + + if commit: + connection.commit() + + # If the cursor is empty, don't cause an error + if not cursor.description: + logger.debug('Query returned 0 rows') + return None + + else: + + # Fetch the data in batches, and "pickle" the rows to a temp file. + # (We pickle rather than writing to, say, a CSV, so that we maintain + # all the type information for each field.) + + temp_file = files.create_temp_file() + + with open(temp_file, 'wb') as f: + # Grab the header + header = [i[0] for i in cursor.description] + pickle.dump(header, f) + + while True: + batch = cursor.fetchmany(QUERY_BATCH_SIZE) + if not batch: + break + + logger.debug(f'Fetched {len(batch)} rows.') + for row in batch: + pickle.dump(list(row), f) + + # Load a Table from the file + final_tbl = Table(petl.frompickle(temp_file)) + + logger.debug(f'Query returned {final_tbl.num_rows} rows.') + return final_tbl + + def copy_s3(self, table_name, bucket, key, manifest=False, data_type='csv', + csv_delimiter=',', compression=None, if_exists='fail', max_errors=0, + distkey=None, sortkey=None, padding=None, varchar_max=None, + statupdate=True, compupdate=True, ignoreheader=1, acceptanydate=True, + dateformat='auto', timeformat='auto', emptyasnull=True, + blanksasnull=True, nullas=None, acceptinvchars=True, truncatecolumns=False, + columntypes=None, specifycols=None, + aws_access_key_id=None, aws_secret_access_key=None, bucket_region=None): + """ + Copy a file from s3 to Redshift. + + `Args:` + table_name: str + The table name and schema (``tmc.cool_table``) to point the file. + bucket: str + The s3 bucket where the file or manifest is located. + key: str + The key of the file or manifest in the s3 bucket. + manifest: str + If using a manifest + data_type: str + The data type of the file. Only ``csv`` supported currently. + csv_delimiter: str + The delimiter of the ``csv``. Only relevant if data_type is ``csv``. + compression: str + If specified (``gzip``), will attempt to decompress the file. + if_exists: str + If the table already exists, either ``fail``, ``append``, ``drop`` + or ``truncate`` the table. + max_errors: int + The maximum number of rows that can error and be skipped before + the job fails. + distkey: str + The column name of the distkey + sortkey: str + The column name of the sortkey + padding: float + A percentage padding to add to varchar columns if creating a new table. This is + helpful to add a buffer for future copies in which the data might be wider. + varchar_max: list + A list of columns in which to set the width of the varchar column to 65,535 + characters. + statupate: boolean + Governs automatic computation and refresh of optimizer statistics at the end + of a successful COPY command. + compupdate: boolean + Controls whether compression encodings are automatically applied during a COPY. + ignore_header: int + The number of header rows to skip. Ignored if data_type is ``json``. + acceptanydate: boolean + Allows any date format, including invalid formats such as 00/00/00 00:00:00, to be + loaded without generating an error. + emptyasnull: boolean + Indicates that Amazon Redshift should load empty char and varchar fields + as ``NULL``. + blanksasnull: boolean + Loads blank varchar fields, which consist of only white space characters, + as ``NULL``. + nullas: str + Loads fields that match string as NULL + acceptinvchars: boolean + Enables loading of data into VARCHAR columns even if the data contains + invalid UTF-8 characters. + dateformat: str + Set the date format. Defaults to ``auto``. + timeformat: str + Set the time format. Defaults to ``auto``. + truncatecolumns: boolean + If the table already exists, truncates data in columns to the appropriate number + of characters so that it fits the column specification. Applies only to columns + with a VARCHAR or CHAR data type, and rows 4 MB or less in size. + columntypes: dict + Optional map of column name to redshift column type, overriding the usual type + inference. You only specify the columns you want to override, eg. + ``columntypes={'phone': 'varchar(12)', 'age': 'int'})``. + specifycols: boolean + Adds a column list to the Redshift `COPY` command, allowing for the source table + in an append to have the columnns out of order, and to have fewer columns with any + leftover target table columns filled in with the `DEFAULT` value. + + This will fail if all of the source table's columns do not match a column in the + target table. This will also fail if the target table has an `IDENTITY` + column and that column name is among the source table's columns. + aws_access_key_id: + An AWS access key granted to the bucket where the file is located. Not required + if keys are stored as environmental variables. + aws_secret_access_key: + An AWS secret access key granted to the bucket where the file is located. Not + required if keys are stored as environmental variables. + bucket_region: str + The AWS region that the bucket is located in. This should be provided if the + Redshift cluster is located in a different region from the temp bucket. + + `Returns` + Parsons Table or ``None`` + See :ref:`parsons-table` for output options. + """ + + with self.connection() as connection: + + if self._create_table_precheck(connection, table_name, if_exists): + # Grab the object from s3 + from parsons.aws.s3 import S3 + s3 = S3(aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) + + local_path = s3.get_file(bucket, key) + + if data_type == 'csv': + tbl = Table.from_csv(local_path, delimiter=csv_delimiter) + else: + raise TypeError("Invalid data type provided") + + # Create the table + sql = self.create_statement(tbl, table_name, padding=padding, + distkey=distkey, sortkey=sortkey, + varchar_max=varchar_max, + columntypes=columntypes) + + self.query_with_connection(sql, connection, commit=False) + logger.info(f'{table_name} created.') + + # Copy the table + copy_sql = self.copy_statement(table_name, bucket, key, manifest=manifest, + data_type=data_type, csv_delimiter=csv_delimiter, + compression=compression, max_errors=max_errors, + statupdate=statupdate, compupdate=compupdate, + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + ignoreheader=ignoreheader, acceptanydate=acceptanydate, + emptyasnull=emptyasnull, blanksasnull=blanksasnull, + nullas=nullas, acceptinvchars=acceptinvchars, + truncatecolumns=truncatecolumns, + specifycols=specifycols, + dateformat=dateformat, timeformat=timeformat, + bucket_region=bucket_region) + + self.query_with_connection(copy_sql, connection, commit=False) + logger.info(f'Data copied to {table_name}.') + + def copy(self, tbl, table_name, if_exists='fail', max_errors=0, distkey=None, + sortkey=None, padding=None, statupdate=False, compupdate=True, acceptanydate=True, + emptyasnull=True, blanksasnull=True, nullas=None, acceptinvchars=True, + dateformat='auto', timeformat='auto', varchar_max=None, truncatecolumns=False, + columntypes=None, specifycols=None, alter_table=False, + aws_access_key_id=None, aws_secret_access_key=None, iam_role=None, + cleanup_s3_file=True, template_table=None, temp_bucket_region=None): + """ + Copy a :ref:`parsons-table` to Redshift. + + `Args:` + tbl: obj + A Parsons Table. + table_name: str + The destination table name (ex. ``my_schema.my_table``). + if_exists: str + If the table already exists, either ``fail``, ``append``, ``drop`` + or ``truncate`` the table. + max_errors: int + The maximum number of rows that can error and be skipped before + the job fails. + distkey: str + The column name of the distkey + sortkey: str + The column name of the sortkey + padding: float + A percentage padding to add to varchar columns if creating a new table. This is + helpful to add a buffer for future copies in which the data might be wider. + varchar_max: list + A list of columns in which to set the width of the varchar column to 65,535 + characters. + statupate: boolean + Governs automatic computation and refresh of optimizer statistics at the end + of a successful COPY command. + compupdate: boolean + Controls whether compression encodings are automatically applied during a COPY. + acceptanydate: boolean + Allows any date format, including invalid formats such as 00/00/00 00:00:00, to be + loaded without generating an error. + emptyasnull: boolean + Indicates that Amazon Redshift should load empty char and varchar fields + as ``NULL``. + blanksasnull: boolean + Loads blank varchar fields, which consist of only white space characters, + as ``NULL``. + nullas: str + Loads fields that match string as NULL + acceptinvchars: boolean + Enables loading of data into VARCHAR columns even if the data contains + invalid UTF-8 characters. + dateformat: str + Set the date format. Defaults to ``auto``. + timeformat: str + Set the time format. Defaults to ``auto``. + truncatecolumns: boolean + If the table already exists, truncates data in columns to the appropriate number + of characters so that it fits the column specification. Applies only to columns + with a VARCHAR or CHAR data type, and rows 4 MB or less in size. + columntypes: dict + Optional map of column name to redshift column type, overriding the usual type + inference. You only specify the columns you want to override, eg. + ``columntypes={'phone': 'varchar(12)', 'age': 'int'})``. + specifycols: boolean + Adds a column list to the Redshift `COPY` command, allowing for the source table + in an append to have the columnns out of order, and to have fewer columns with any + leftover target table columns filled in with the `DEFAULT` value. + + This will fail if all of the source table's columns do not match a column in the + target table. This will also fail if the target table has an `IDENTITY` + column and that column name is among the source table's columns. + alter_table: boolean + Will check if the target table varchar widths are wide enough to copy in the + table data. If not, will attempt to alter the table to make it wide enough. This + will not work with tables that have dependent views. + aws_access_key_id: + An AWS access key granted to the bucket where the file is located. Not required + if keys are stored as environmental variables. + aws_secret_access_key: + An AWS secret access key granted to the bucket where the file is located. Not + required if keys are stored as environmental variables. + iam_role: str + An AWS IAM Role ARN string; an alternative credential for the COPY command + from Redshift to S3. The IAM role must have been assigned to the Redshift + instance and have access to the S3 bucket. + cleanup_s3_file: boolean + The s3 upload is removed by default on cleanup. You can set to False for debugging. + template_table: str + Instead of specifying columns, columntypes, and/or inference, if there + is a pre-existing table that has the same columns/types, then use the template_table + table name as the schema for the new table. + Unless you set specifycols=False explicitly, a template_table will set it to True + temp_bucket_region: str + The AWS region that the temp bucket (specified by the TEMP_S3_BUCKET environment + variable) is located in. This should be provided if the Redshift cluster is located + in a different region from the temp bucket. + + `Returns` + Parsons Table or ``None`` + See :ref:`parsons-table` for output options. + """ + + # Specify the columns for a copy statement. + if specifycols or (specifycols is None and template_table): + cols = tbl.columns + else: + cols = None + + with self.connection() as connection: + + # Check to see if the table exists. If it does not or if_exists = drop, then + # create the new table. + if self._create_table_precheck(connection, table_name, if_exists): + if template_table: + # Copy the schema from the template table + sql = f'CREATE TABLE {table_name} (LIKE {template_table})' + else: + sql = self.create_statement(tbl, table_name, padding=padding, + distkey=distkey, sortkey=sortkey, + varchar_max=varchar_max, + columntypes=columntypes) + self.query_with_connection(sql, connection, commit=False) + logger.info(f'{table_name} created.') + + # If alter_table is True, then alter table if the table column widths + # are wider than the existing table. + if alter_table: + self.alter_varchar_column_widths(tbl, table_name) + + # Upload the table to S3 + key = self.temp_s3_copy(tbl, aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) + + try: + # Copy to Redshift database. + copy_args = {'max_errors': max_errors, + 'ignoreheader': 1, + 'statupdate': statupdate, + 'compupdate': compupdate, + 'acceptanydate': acceptanydate, + 'dateformat': dateformat, + 'timeformat': timeformat, + 'blanksasnull': blanksasnull, + 'nullas': nullas, + 'emptyasnull': emptyasnull, + 'acceptinvchars': acceptinvchars, + 'truncatecolumns': truncatecolumns, + 'specifycols': cols, + 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key, + 'compression': 'gzip', + 'bucket_region': temp_bucket_region} + + # Copy from S3 to Redshift + sql = self.copy_statement(table_name, self.s3_temp_bucket, key, **copy_args) + logger.debug(f'Copy SQL command: {sql}') + self.query_with_connection(sql, connection, commit=False) + + logger.info(f'Data copied to {table_name}.') + + # Clean up the S3 bucket. + finally: + if key and cleanup_s3_file: + self.temp_s3_delete(key) + + def unload(self, sql, bucket, key_prefix, manifest=True, header=True, delimiter='|', + compression='gzip', add_quotes=True, null_as=None, escape=True, allow_overwrite=True, + parallel=True, max_file_size='6.2 GB', aws_region=None, aws_access_key_id=None, + aws_secret_access_key=None): + """ + Unload Redshift data to S3 Bucket. This is a more efficient method than running a query + to export data as it can export in parallel and directly into an S3 bucket. Consider + using this for exports of 10MM or more rows. + + sql: str + The SQL string to execute to generate the data to unload. + buckey: str + The destination S3 bucket + key_prefix: str + The prefix of the key names that will be written + manifest: boolean + Creates a manifest file that explicitly lists details for the data files + that are created by the UNLOAD process. + header: boolean + Adds a header line containing column names at the top of each output file. + delimiter: str + Specificies the character used to separate fields. Defaults to '|'. + compression: str + One of ``gzip``, ``bzip2`` or ``None``. Unloads data to one or more compressed + files per slice. Each resulting file is appended with a ``.gz`` or ``.bz2`` extension. + add_quotes: boolean + Places quotation marks around each unloaded data field, so that Amazon Redshift + can unload data values that contain the delimiter itself. + null_as: str + Specifies a string that represents a null value in unload files. If this option is + not specified, null values are unloaded as zero-length strings for delimited output. + escape: boolean + For CHAR and VARCHAR columns in delimited unload files, an escape character (\) is + placed before every linefeed, carriage return, escape characters and delimiters. + allow_overwrite: boolean + If ``True``, will overwrite existing files, including the manifest file. If ``False`` + will fail. + parallel: boolean + By default, UNLOAD writes data in parallel to multiple files, according to the number + of slices in the cluster. The default option is ON or TRUE. If PARALLEL is OFF or + FALSE, UNLOAD writes to one or more data files serially, sorted absolutely according + to the ORDER BY clause, if one is used. + max_file_size: str + The maximum size of files UNLOAD creates in Amazon S3. Specify a decimal value between + 5 MB and 6.2 GB. + region: str + The AWS Region where the target Amazon S3 bucket is located. REGION is required for + UNLOAD to an Amazon S3 bucket that is not in the same AWS Region as the Amazon Redshift + cluster. + aws_access_key_id: + An AWS access key granted to the bucket where the file is located. Not required + if keys are stored as environmental variables. + aws_secret_access_key: + An AWS secret access key granted to the bucket where the file is located. Not + required if keys are stored as environmental variables. + """ # NOQA W605 + + # The sql query is provided between single quotes, therefore single + # quotes within the actual query must be escaped. + # https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html#unload-parameters + sql = sql.replace("'", "''") + + statement = f""" + UNLOAD ('{sql}') to 's3://{bucket}/{key_prefix}' \n + {self.get_creds(aws_access_key_id, aws_secret_access_key)} \n + PARALLEL {parallel} \n + MAXFILESIZE {max_file_size} + """ + if manifest: + statement += "MANIFEST \n" + if header: + statement += "HEADER \n" + if delimiter: + statement += f"DELIMITER as '{delimiter}' \n" + if compression: + statement += f"{compression.upper()} \n" + if add_quotes: + statement += "ADDQUOTES \n" + if null_as: + statement += f"NULL {null_as} \n" + if escape: + statement += "ESCAPE \n" + if allow_overwrite: + statement += "ALLOWOVERWRITE \n" + if aws_region: + statement += f"REGION {aws_region} \n" + + logger.info(f'Unloading data to s3://{bucket}/{key_prefix}') + logger.debug(statement) + + return self.query(statement) + + def generate_manifest(self, buckets, aws_access_key_id=None, aws_secret_access_key=None, + mandatory=True, prefix=None, manifest_bucket=None, manifest_key=None, + path=None): + """ + Given a list of S3 buckets, generate a manifest file (JSON format). A manifest file + allows you to copy multiple files into a single table at once. Once the manifest is + generated, you can pass it with the :func:`~parsons.redshift.Redshift.copy_s3` method. + + AWS keys are not required if ``AWS_ACCESS_KEY_ID`` and + ``AWS_SECRET_ACCESS_KEY`` environmental variables set. + + `Args:` + + buckets: list or str + A list of buckets or single bucket from which to generate manifest + aws_access_key_id: str + AWS access key id to access S3 bucket + aws_secret_access_key: str + AWS secret access key to access S3 bucket + mandatory: boolean + The mandatory flag indicates whether the Redshift COPY should + terminate if the file does not exist. + prefix: str + Optional filter for key prefixes + manifest_bucket: str + Optional bucket to write manifest file. + manifest_key: str + Optional key name for S3 bucket to write file + + `Returns:` + ``dict`` of manifest + """ + + from parsons.aws import S3 + s3 = S3(aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) + + # Deal with a single bucket being passed, rather than list. + if isinstance(buckets, str): + buckets = [buckets] + + # Generate manifest file + manifest = {'entries': []} + for bucket in buckets: + + # Retrieve list of files in bucket + key_list = s3.list_keys(bucket, prefix=prefix) + for key in key_list: + manifest['entries'].append({ + 'url': '/'.join(['s3:/', bucket, key]), + 'mandatory': mandatory + }) + + logger.info('Manifest generated.') + + # Save the file to s3 bucket if provided + if manifest_key and manifest_bucket: + # Dump the manifest to a temp JSON file + manifest_path = files.create_temp_file() + with open(manifest_path, 'w') as manifest_file_obj: + json.dump(manifest, manifest_file_obj, sort_keys=True, indent=4) + + # Upload the file to S3 + s3.put_file(manifest_bucket, manifest_key, manifest_path) + + logger.info(f'Manifest saved to s3://{manifest_bucket}/{manifest_key}') + + return manifest + + def upsert(self, table_obj, target_table, primary_key, vacuum=True, distinct_check=True, + cleanup_temp_table=True, alter_table=True, **copy_args): + """ + Preform an upsert on an existing table. An upsert is a function in which records + in a table are updated and inserted at the same time. Unlike other SQL databases, + it does not exist natively in Redshift. + + `Args:` + table_obj: obj + A Parsons table object + target_table: str + The schema and table name to upsert + primary_key: str or list + The primary key column(s) of the target table + vacuum: boolean + Re-sorts rows and reclaims space in the specified table. You must be a table owner + or super user to effectively vacuum a table, however the method will not fail + if you lack these priviledges. + distinct_check: boolean + Check if the primary key column is distinct. Raise error if not. + cleanup_temp_table: boolean + A temp table is dropped by default on cleanup. You can set to False for debugging. + \**copy_args: kwargs + See :func:`~parsons.databases.Redshift.copy`` for options. + """ # noqa: W605 + + if not self.table_exists(target_table): + logger.info('Target table does not exist. Copying into newly \ + created target table.') + self.copy(table_obj, target_table) + return None + + if alter_table: + # Make target table column widths match incoming table, if necessary + self.alter_varchar_column_widths(table_obj, target_table) + + noise = f'{random.randrange(0, 10000):04}'[:4] + date_stamp = datetime.datetime.now().strftime('%Y%m%d_%H%M') + # Generate a temp table like "table_tmp_20200210_1230_14212" + staging_tbl = '{}_stg_{}_{}'.format(target_table, date_stamp, noise) + + if isinstance(primary_key, str): + primary_keys = [primary_key] + else: + primary_keys = primary_key + + if distinct_check: + primary_keys_statement = ', '.join(primary_keys) + diff = self.query(f''' + select ( + select count(*) + from {target_table} + ) - ( + SELECT COUNT(*) from ( + select distinct {primary_keys_statement} + from {target_table} + ) + ) as total_count + ''').first + if diff > 0: + raise ValueError('Primary key column contains duplicate values.') + + with self.connection() as connection: + + try: + # Copy to a staging table + logger.info(f'Building staging table: {staging_tbl}') + if 'compupdate' not in copy_args: + # Especially with a lot of columns, compupdate=True can + # cause a lot of processing/analysis by Redshift before upload. + # Since this is a temporary table, setting compression for each + # column is not impactful barely impactful + # https://docs.aws.amazon.com/redshift/latest/dg/c_Loading_tables_auto_compress.html + copy_args = dict(copy_args, compupdate=False) + self.copy(table_obj, staging_tbl, + template_table=target_table, + alter_table=False, # We just did our own alter table above + **copy_args) + + staging_table_name = staging_tbl.split('.')[1] + target_table_name = target_table.split('.')[1] + + # Delete rows + comparisons = [ + f'{staging_table_name}.{primary_key} = {target_table_name}.{primary_key}' + for primary_key in primary_keys + ] + where_clause = ' and '.join(comparisons) + + sql = f""" + DELETE FROM {target_table} + USING {staging_tbl} + WHERE {where_clause} + """ + self.query_with_connection(sql, connection, commit=False) + logger.debug(f'Target rows deleted from {target_table}.') + + # Insert rows + # ALTER TABLE APPEND would be more efficient, but you can't run it in a + # transaction block. It's worth the performance hit to not commit until the + # end. + sql = f""" + INSERT INTO {target_table} + SELECT * FROM {staging_tbl}; + """ + + self.query_with_connection(sql, connection, commit=False) + logger.info(f'Target rows inserted to {target_table}') + + finally: + if cleanup_temp_table: + # Drop the staging table + self.query_with_connection(f"DROP TABLE IF EXISTS {staging_tbl};", + connection, commit=False) + logger.info(f'{staging_tbl} staging table dropped.') + + # Vacuum table. You must commit when running this type of transaction. + if vacuum: + with self.connection() as connection: + connection.set_session(autocommit=True) + self.query_with_connection(f'VACUUM {target_table};', connection) + logger.info(f'{target_table} vacuumed.') + + def alter_varchar_column_widths(self, tbl, table_name): + """ + Alter the width of a varchar columns in a Redshift table to match the widths + of a Parsons table. The columns are matched by column name and not their + index. + + `Args:` + tbl: obj + A Parsons table + table_name: + The target table name (e.g. ``my_schema.my_table``) + `Returns:` + ``None`` + """ + + # Make the Parsons table column names match valid Redshift names + tbl.table = petl.setheader(tbl.table, self.column_name_validate(tbl.columns)) + + # Create a list of column names and max width for string values. + pc = {c: tbl.get_column_max_width(c) for c in tbl.columns} + + # Determine the max width of the varchar columns in the Redshift table + s, t = self.split_full_table_name(table_name) + cols = self.get_columns(s, t) + rc = {k: v['max_length'] for k, v in cols.items() if v['data_type'] == 'character varying'} # noqa: E501, E261 + + # Figure out if any of the destination table varchar columns are smaller than the + # associated Parsons table columns. If they are, then alter column types to expand + # their width. + for c in set(rc.keys()).intersection(set(pc.keys())): + if rc[c] < pc[c]: + logger.info(f'{c} not wide enough. Expanding column width.') + self.alter_table_column_type(table_name, c, 'varchar', varchar_width=pc[c]) + + def alter_table_column_type(self, table_name, column_name, data_type, varchar_width=None): + """ + Alter a column type of an existing table. + + table_name: str + The table name (ex. ``my_schema.my_table``). + column_name: str + The target column name + data_type: str + A valid Redshift data type to alter the table to. + varchar_width: + The new width of the column if of type varchar. + """ + + sql = f"ALTER TABLE {table_name} ALTER COLUMN {column_name} TYPE {data_type}" + + if varchar_width: + sql += f"({varchar_width})" + + with self.connection() as connection: + connection.set_session(autocommit=True) + self.query_with_connection(sql, connection) + logger.info(f'Altered {table_name} {column_name}.') + + def table(self, table_name): + # Return a Redshift table object + + return RedshiftTable(self, table_name)
+ + +class RedshiftTable(BaseTable): + # Redshift table object. + + pass +
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/databases/redshift/rs_queries.html b/docs/html/_modules/parsons/databases/redshift/rs_queries.html new file mode 100644 index 0000000000..013a79dd65 --- /dev/null +++ b/docs/html/_modules/parsons/databases/redshift/rs_queries.html @@ -0,0 +1,639 @@ + + + + + + + + + + + parsons.databases.redshift.rs_queries — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.databases.redshift.rs_queries

+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class RedshiftQueries(object):
+
+    def __init__(self):
+
+        pass
+
+    def rename_table(self, table_name, new_table_name):
+        """
+        Rename an existing table.
+
+        .. note::
+            You cannot move schemas when renaming a table. Instead, utilize
+            the :meth:``parsons.Redshift.table_duplicate.`` method.
+
+        Args:
+            table_name: str
+                Name of existing schema and table (e.g. ``myschema.oldtable``)
+            new_table_name: str
+                New name for table. Note: Omit schema in table name.
+
+        """
+
+        sql = f"alter table {table_name} rename to {new_table_name}"
+        self.query(sql)
+        logger.info(f"{table_name} renamed to {new_table_name}")
+
+    def move_table(self, source_table, new_table, drop_source_table=False):
+        """
+        Move an existing table in the database.
+
+        It will inherit encoding, sortkey and distkey. **Once run, the source table
+        rows will be empty.** This is more efficiant than running
+        ``"create newtable as select * from oldtable"``.
+
+        For more imformation see: `ALTER TABLE APPEND <https://docs.aws.amazon.com/redshift/latest/dg/r_ALTER_TABLE_APPEND.html>`_
+
+        Args:
+            source_table: str
+                Name of existing schema and table (e.g. ``myschema.oldtable``)
+            new_table: str
+                New name of schema and table (e.g. ``myschema.newtable``)
+            drop_original: boolean
+                Drop the source table.
+        Returns:
+                None
+        """ # noqa: E501,E261
+
+        # To Do: Add the grants
+        # To Do: Argument for if the table exists?
+        # To Do: Add the ignore extra kwarg.
+
+        create_sql = f"create table {new_table} (like {source_table});"
+        alter_sql = f"alter table {new_table} append from {source_table}"
+
+        logger.info(f'Creating empty {new_table} from {source_table}.')
+        self.query(create_sql)
+
+        with self.connection() as conn:
+
+            #  An ALTER TABLE statement can't be run within a block, meaning
+            #  that it needs to be committed on running. To enable this,
+            #  the connection must be set to autocommit.
+
+            conn.set_session(autocommit=True)
+            logger.info(f'Moving data from {source_table} to {new_table}.')
+            self.query_with_connection(alter_sql, conn)
+
+        if drop_source_table:
+            self.query(f"drop table {source_table};")
+            logger.info(f'{source_table} dropped.')
+
+        logger.info(f'{source_table} data moved from {new_table}  .')
+
+    def _create_table_precheck(self, connection, table_name, if_exists):
+        """
+        Helper to determine what to do when you need a table that may already exist.
+
+        `Args:`
+            connection: obj
+                A connection object obtained from ``redshift.connection()``
+            table_name: str
+                The table to check
+            if_exists: str
+                If the table already exists, either ``fail``, ``append``, ``drop``,
+                or ``truncate`` the table.
+        `Returns:`
+            bool
+                True if the table needs to be created, False otherwise.
+        """
+
+        if if_exists not in ['fail', 'truncate', 'append', 'drop']:
+            raise ValueError("Invalid value for `if_exists` argument")
+
+        exists = self.table_exists_with_connection(table_name, connection)
+
+        if exists and if_exists in ['fail', 'truncate', 'append']:
+            if if_exists == 'fail':
+                raise ValueError('Table already exists.')
+            elif if_exists == 'truncate':
+                truncate_sql = f"truncate table {table_name}"
+                self.query_with_connection(truncate_sql, connection, commit=False)
+
+        else:
+            if exists and if_exists == 'drop':
+                logger.info(f"Table {table_name} exist, will drop...")
+                drop_sql = f"drop table {table_name};\n"
+                self.query_with_connection(drop_sql, connection, commit=False)
+
+            return True
+
+        return False
+
+    def populate_table_from_query(self, query, destination_table, if_exists='fail', distkey=None,
+                                  sortkey=None):
+        """
+        Populate a Redshift table with the results of a SQL query, creating the table if it
+        doesn't yet exist.
+
+        `Args:`
+            query: str
+                The SQL query
+            destination_table: str
+                Name of destination schema and table (e.g. ``myschema.newtable``)
+            if_exists: str
+                If the table already exists, either ``fail``, ``append``, ``drop``,
+                or ``truncate`` the table.
+            distkey: str
+                The column to use as the distkey for the table.
+            sortkey: str
+                The column to use as the sortkey for the table.
+        """
+        with self.connection() as conn:
+            should_create = self._create_table_precheck(conn, destination_table, if_exists)
+
+            if should_create:
+                logger.info(f"Creating table {destination_table} from query...")
+                sql = f"create table {destination_table}"
+                if distkey:
+                    sql += f" distkey({distkey})"
+                if sortkey:
+                    sql += f" sortkey({sortkey})"
+                sql += f" as {query}"
+            else:
+                logger.info(f"Inserting data into {destination_table} from query...")
+                sql = f"insert into {destination_table} ({query})"
+
+            self.query_with_connection(sql, conn, commit=False)
+
+        logger.info(f'{destination_table} created from query')
+
+    def duplicate_table(self, source_table, destination_table, where_clause='',
+                        if_exists='fail', drop_source_table=False):
+        """
+        Create a copy of an existing table (or subset of rows) in a new
+        table. It will inherit encoding, sortkey and distkey.
+
+        `Args:`
+            source_table: str
+                Name of existing schema and table (e.g. ``myschema.oldtable``)
+            destination_table: str
+                Name of destination schema and table (e.g. ``myschema.newtable``)
+            where_clause: str
+                An optional where clause (e.g. ``where org = 1``).
+            if_exists: str
+                If the table already exists, either ``fail``, ``append``, ``drop``,
+                or ``truncate`` the table.
+            drop_source_table: boolean
+                Drop the source table
+        """
+
+        with self.connection() as conn:
+            should_create = self._create_table_precheck(conn, destination_table, if_exists)
+
+            if should_create:
+                logger.info(f'Creating {destination_table} from {source_table}...')
+                create_sql = f"create table {destination_table} (like {source_table})"
+                self.query_with_connection(create_sql, conn, commit=False)
+
+            logger.info(f"Transferring data to {destination_table} from {source_table}")
+            select_sql = f"select * from {source_table} {where_clause}"
+            insert_sql = f"insert into {destination_table} ({select_sql})"
+            self.query_with_connection(insert_sql, conn, commit=False)
+
+            if drop_source_table:
+                logger.info(f'Dropping table {source_table}...')
+                drop_sql = f"drop table {source_table}"
+                self.query_with_connection(drop_sql, conn, commit=False)
+
+        logger.info(f'{destination_table} created from {source_table}.')
+
+    def union_tables(self, new_table_name, tables, union_all=True, view=False):
+        """
+        Union a series of table into a new table.
+
+        Args:
+            new_table_name: str
+                The new table and schema (e.g. ``myschema.newtable``)
+            tables: list
+                A list of tables to union
+            union_all: boolean
+                If ``False`` will dedupe rows
+            view: boolean
+                Create a view rather than a static table
+        Returns:
+            None
+        """
+
+        union_type = " UNION ALL" if union_all else " UNION"
+        table_type = "VIEW" if view else "TABLE"
+
+        sql = f"CREATE {table_type} {new_table_name} AS"
+        for index, t in enumerate(tables):
+            if index != 0:
+                sql += union_type
+            sql += f" SELECT * FROM {t}"
+
+        self.query(sql)
+
+        logger.info(f"Created {new_table_name} from {', '.join(tables)}")
+
+    def get_tables(self, schema=None, table_name=None):
+        """
+        List the tables in a schema including metadata.
+
+        Args:
+            schema: str
+                Filter by a schema
+            table_name: str
+                Filter by a table name
+        `Returns:`
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        logger.info(f"Retrieving tables info.")
+        sql = "select * from pg_tables"
+        if schema or table_name:
+            sql += f" where"
+        if schema:
+            sql += f" schemaname = '{schema}'"
+        if table_name:
+            if schema:
+                sql += " and"
+            sql += f" tablename = '{table_name}'"
+        return self.query(sql)
+
+    def get_table_stats(self, schema=None, table_name=None):
+        """
+        List the tables statistics includes row count and size.
+
+        .. warning::
+           This method is only accessible by Redshift *superusers*.
+
+        `Args:`
+            schema: str
+                Filter by a schema
+            table_name: str
+                Filter by a table name
+        `Returns:`
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        logger.info(f"Retrieving table statistics.")
+        sql = "select * from svv_table_info"
+        if schema or table_name:
+            sql += f" where"
+        if schema:
+            sql += f" schema = '{schema}'"
+        if table_name:
+            if schema:
+                sql += " and "
+            sql += f" \"table\" = '{table_name}'"
+        return self.query(sql)
+
+    def get_columns(self, schema, table_name):
+        """
+        Gets the column names (and some other column info) for a table.
+
+        If you just need the column names, you can treat the return value like a list, eg:
+
+        .. code-block:: python
+          for col in rs.get_columns('some_schema', 'some_table'):
+            print(col)
+
+        `Args:`
+            schema: str
+                The schema name
+            table_name: str
+                The table name
+        `Returns:`
+            A dict mapping column name to a dict with extra info. The keys of the dict are ordered
+            just like the columns in the table. The extra info is a dict with format {
+              'data_type': str,
+              'max_length': int or None,
+              'is_nullable': bool,
+            }
+        """
+
+        query = f"""
+            select ordinal_position,
+                   column_name,
+                   data_type,
+                   case when character_maximum_length is not null
+                        then character_maximum_length
+                        else numeric_precision end as max_length,
+                   is_nullable
+            from information_schema.columns
+            where table_name = '{table_name}'
+            and table_schema = '{schema}'
+            order by ordinal_position
+        """
+
+        return {
+            row['column_name']: {
+                'data_type': row['data_type'],
+                'max_length': row['max_length'],
+                'is_nullable': row['is_nullable'] == 'YES',
+            }
+            for row in self.query(query)
+        }
+
+    def get_views(self, schema=None, view=None):
+        """
+        List views.
+
+        Args:
+            schema: str
+                Filter by a schema
+            view: str
+                Filter by a table name
+        `Returns:`
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        logger.info(f"Retrieving views info.")
+        sql = """
+              select table_schema as schema_name,
+              table_name as view_name,
+              view_definition
+              from information_schema.views
+              where table_schema not in ('information_schema', 'pg_catalog')
+              """
+        if schema:
+            sql += f" and table_schema = '{schema}'"
+        if view:
+            sql += f" and table_name = '{view}'"
+        return self.query(sql)
+
+    def get_queries(self):
+        """
+        Return the Current queries running and queueing, along with resource consumption.
+
+        .. warning::
+            Must be a Redshift superuser to run this method.
+
+        `Returns:`
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        logger.info('Retrieving running and queued queries.')
+
+        # Lifted from Redshift Utils https://github.com/awslabs/amazon-redshift-utils/blob/master/src/AdminScripts/running_queues.sql # noqa: E501
+        sql = """
+              select trim(u.usename) as user,
+                s.pid,
+                q.xid,
+                q.query,
+                q.service_class as service_class,
+                q.slot_count as slot,
+                date_trunc('second',
+                q.wlm_start_time) as start,
+                decode(trim(q.state),
+                    'Running',
+                    'Run',
+                    'QueuedWaiting',
+                    'Queue',
+                    'Returning',
+                    'Return',trim(q.state)) as state,
+                q.queue_Time/1000000 as queue_sec,
+                q.exec_time/1000000 as exec_sec,
+                m.cpu_time/1000000 cpu_sec,
+                m.blocks_read read_mb,
+                decode(m.blocks_to_disk,-1,null,m.blocks_to_disk) spill_mb,
+                m2.rows as return_rows,
+                m3.rows as NL_rows,
+                substring(replace(nvl(qrytext_cur.text,trim(translate(s.text,chr(10)||chr(13)||chr(9) ,''))),'\\n',' '),1,90) as sql, -- # noqa: E501
+                trim(decode(event&1,1,'SK ','') || decode(event&2,2,'Del ','') || decode(event&4,4,'NL ','') ||  decode(event&8,8,'Dist ','') || decode(event&16,16,'Bcast ','') || decode(event&32,32,'Stats ','')) as Alert -- # noqa: E501
+            from stv_wlm_query_state q
+            left outer join stl_querytext s on (s.query=q.query and sequence = 0)
+            left outer join stv_query_metrics m on ( q.query = m.query and m.segment=-1 and m.step=-1 )
+            left outer join stv_query_metrics m2 on ( q.query = m2.query and m2.step_type = 38 )
+            left outer join ( select query, sum(rows) as rows from stv_query_metrics m3 where step_type = 15 group by 1) as m3 on ( q.query = m3.query ) -- # noqa: E501
+            left outer join pg_user u on ( s.userid = u.usesysid )
+            LEFT OUTER JOIN (SELECT ut.xid,'CURSOR ' || TRIM( substring ( TEXT from strpos(upper(TEXT),'SELECT') )) as TEXT
+            FROM stl_utilitytext ut
+            WHERE sequence = 0
+               AND upper(TEXT) like 'DECLARE%'
+               GROUP BY text, ut.xid) qrytext_cur ON (q.xid = qrytext_cur.xid)
+            left outer join ( select query,sum(decode(trim(split_part(event,':',1)),'Very selective query filter',1,'Scanned a large number of deleted rows',2,'Nested Loop Join in the query plan',4,'Distributed a large number of rows across the network',8,'Broadcasted a large number of rows across the network',16,'Missing query planner statistics',32,0)) as event from STL_ALERT_EVENT_LOG -- # noqa: E501
+            where event_time >=  dateadd(hour, -8, current_Date) group by query  ) as alrt on alrt.query = q.query -- # noqa: E501
+            """
+
+        return self.query(sql)
+
+    def get_max_date(self, table_name, date_column):
+        """
+        Return the max date from a table.a
+
+        `Args:`
+            table_name: str
+                Schema and table name
+            date_column: str
+                The column containing the date
+        """
+
+        return self.query(f'SELECT MAX({date_column}) date from {table_name}')[0]['date']
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/databases/redshift/rs_schema.html b/docs/html/_modules/parsons/databases/redshift/rs_schema.html new file mode 100644 index 0000000000..8c27b3b4db --- /dev/null +++ b/docs/html/_modules/parsons/databases/redshift/rs_schema.html @@ -0,0 +1,290 @@ + + + + + + + + + + + parsons.databases.redshift.rs_schema — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.databases.redshift.rs_schema
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.databases.redshift.rs_schema

+
[docs]class RedshiftSchema(object): + + def schema_exists(self, schema): + sql = f"select * from pg_namespace where nspname = '{schema}'" + res = self.query(sql) + return res.num_rows > 0 + +
[docs] def create_schema_with_permissions(self, schema, group=None): + """ + Creates a Redshift schema (if it doesn't already exist), and grants usage permissions to + a Redshift group (if specified). + + `Args:` + schema: str + The schema name + group: str + The Redshift group name + type: str + The type of permissions to grant. Supports `select`, `all`, etc. (For + full list, see the + `Redshift GRANT docs <https://docs.aws.amazon.com/redshift/latest/dg/r_GRANT.html>`_) + """ # noqa: E501,E261 + + if not self.schema_exists(schema): + self.query(f"create schema {schema}") + self.query(f"grant usage on schema {schema} to group {group}")
+ +
[docs] def grant_schema_permissions(self, schema, group, permissions_type='select'): + """ + Grants a Redshift group permissions to all tables within an existing schema. + + `Args:` + schema: str + The schema name + group: str + The Redshift group name + type: str + The type of permissions to grant. Supports `select`, `all`, etc. (For + full list, see the + `Redshift GRANT docs <https://docs.aws.amazon.com/redshift/latest/dg/r_GRANT.html>`_) + """ # noqa: E501,E261 + + sql = f""" + grant usage on schema {schema} to group {group}; + grant {permissions_type} on all tables in schema {schema} to group {group}; + """ + self.query(sql)
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/databases/sqldb.html b/docs/html/_modules/parsons/databases/sqldb.html new file mode 100644 index 0000000000..7b8d0ddd2b --- /dev/null +++ b/docs/html/_modules/parsons/databases/sqldb.html @@ -0,0 +1,235 @@ + + + + + + + + + + + parsons.databases.sqldb — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.databases.sqldb

+import petl
+
+
+class SQLDatabase(object):
+
+    def __init__(self):
+
+        pass
+
+    def download(self, sql):
+        """
+        Execute a SQL statement and returns the results, if any.
+
+        `Args:`
+            sql: str
+                A SQL statement
+        `Returns:`
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        return petl.fromdb(self.conn, sql)
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/etl/etl.html b/docs/html/_modules/parsons/etl/etl.html new file mode 100644 index 0000000000..dc052c1e6e --- /dev/null +++ b/docs/html/_modules/parsons/etl/etl.html @@ -0,0 +1,1240 @@ + + + + + + + + + + + parsons.etl.etl — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.etl.etl

+import petl
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class ETL(object): + + def __init__(self): + + pass + +
[docs] def add_column(self, column, value=None, index=None): + """ + Add a column to your table + + `Args:` + column: str + Name of column to add + value: + A fixed or calculated value + index: int + The position of the new column in the table + `Returns:` + `Parsons Table` and also updates self + """ + + if column in self.columns: + raise ValueError(f"Column {column} already exists") + + self.table = self.table.addfield(column, value, index) + + return self
+ +
[docs] def remove_column(self, *columns): + """ + Remove a column from your table + + `Args:` + \*columns: str + Column names + `Returns:` + `Parsons Table` and also updates self + """ # noqa: W605 + + self.table = petl.cutout(self.table, *columns) + + return self
+ +
[docs] def rename_column(self, column_name, new_column_name): + """ + Rename a column + + `Args:` + column_name: str + The current column name + new_column_name: str + The new column name + `Returns:` + `Parsons Table` and also updates self + """ + + if new_column_name in self.columns: + raise ValueError(f"Column {new_column_name} already exists") + + self.table = petl.rename(self.table, column_name, new_column_name) + + return self
+ +
[docs] def fill_column(self, column_name, fill_value): + """ + Fill a column in a table + + `Args:` + column_name: str + The column to fill + fill_value: + A fixed or calculated value + `Returns:` + `Parsons Table` and also updates self + """ + + self.add_column(column_name + '_column_fill_temp', fill_value) + self.remove_column(column_name) + self.rename_column(column_name + '_column_fill_temp', column_name) + + return self
+ +
[docs] def fillna_column(self, column_name, fill_value): + """ + Fill None values in a column in a table + + `Args:` + column_name: str + The column to fill + fill_value: + Fixed value only + `Returns:` + `Parsons Table` and also updates self + """ + + self.fill_column(column_name, lambda x: x[column_name] if x[column_name] else fill_value) + + return self
+ +
[docs] def move_column(self, column, index): + """ + Move a column + + `Args:` + column: str + The column name to move + index: + The new index for the column + `Returns:` + `Parsons Table` and also updates existing object. + """ + + self.table = petl.movefield(self.table, column, index) + + return self
+ +
[docs] def convert_column(self, *column, **kwargs): + """ + Transform values under one or more fields via arbitrary functions, method + invocations or dictionary translations. This leverages the petl ``convert()`` + method. Example usage can be found `here <https://petl.readthedocs.io/en/v0.24/transform.html#petl.convert>`_. + + `Args:` + \*column: str + A single column or multiple columns passed as a list + \**kwargs: str, method or variable + The update function, method, or variable to process the update + `Returns:` + `Parsons Table` and also updates self + """ # noqa: E501,E261 + + self.table = petl.convert(self.table, *column, **kwargs) + + return self
+ +
[docs] def get_column_max_width(self, column): + """ + Return the maximum width of the column. + + `Args:` + column: str + The column name. + `Returns:` + int + """ + + max_width = 0 + + for v in petl.values(self.table, column): + + if len(str(v).encode('utf-8')) > max_width: + max_width = len(str(v).encode('utf-8')) + + return max_width
+ +
[docs] def convert_columns_to_str(self): + """ + Convenience function to convert all non-string or mixed columns in a + Parsons table to string (e.g. for comparison) + + `Returns:` + `Parsons Table` and also updates self + """ + + # If we don't have any rows, don't bother trying to convert things + if self.num_rows == 0: + return self + + cols = self.get_columns_type_stats() + + for col in cols: + # If there's more than one type (or no types), convert to str + # Also if there is one type and it's not str, convert to str + if len(col['type']) != 1 or col['type'][0] != 'str': + self.convert_column(col['name'], str) + + return self
+ +
[docs] def coalesce_columns(self, dest_column, source_columns, remove_source_columns=True): + """ + Coalesces values from one or more source columns into a destination column, by selecting + the first non-empty value. If the destination column doesn't exist, it will be added. + + `Args:` + dest_column: str + Name of destination column + source_columns: list + List of source column names + remove_source_columns: bool + Whether to remove the source columns after the coalesce. If the destination + column is also one of the source columns, it will not be removed. + `Returns:` + `Parsons Table` and also updates self + """ + + if dest_column in self.columns: + def convert_fn(value, row): + for source_col in source_columns: + if row.get(source_col): + return row[source_col] + + logger.debug(f"Coalescing {source_columns} into {dest_column}") + self.convert_column(dest_column, convert_fn, pass_row=True) + + else: + def add_fn(row): + for source_col in source_columns: + if row.get(source_col): + return row[source_col] + + logger.debug(f"Creating new column {dest_column} from {source_columns}") + self.add_column(dest_column, add_fn) + + if remove_source_columns: + for source_col in source_columns: + if source_col != dest_column: + self.remove_column(source_col) + + return self
+ +
[docs] def map_columns(self, column_map): + """ + Standardizes column names based on multiple possible values. This method + is helpful when your input table might have multiple and unknown column + names. + + `Args:` + column_map: dict + A dictionary of columns and possible values that map to it + `Returns:` + `Parsons Table` and also updates self + + .. code-block:: python + + tbl = [{fn: 'Jane'}, + {lastname: 'Doe'}, + {dob: '1980-01-01'}] + column_map = {first_name: ['fn', 'first', 'firstname'], + last_name: ['ln', 'last', 'lastname'], + date_of_birth: ['dob', 'birthday']} + tbl.map_columns(column_map) + print (tbl) + >> {{first_name: 'Jane', last_name: 'Doe', 'date_of_birth': '1908-01-01'}} + """ + + for c in self.columns: + for k, v in column_map.items(): + for i in v: + if c == i: + self.rename_column(c, k) + + return self
+ +
[docs] def map_and_coalesce_columns(self, column_map): + """ + Coalesces columns based on multiple possible values. The columns in the map + do not need to be in your table, so you can create a map with all possibilities. + The coalesce will occur in the order that the columns are listed, unless the + destination column name already exists in the table, in which case that + value will be preferenced. This method is helpful when your input table might + have multiple and unknown column names. + `Args:` + column_map: dict + A dictionary of columns and possible values that map to it + + `Returns:` + `Parsons Table` and also updates self + + .. code-block:: python + + tbl = [{first: None}, + {fn: 'Jane'}, + {lastname: 'Doe'}, + {dob: '1980-01-01'}] + + column_map = {first_name: ['fn', 'first', 'firstname'], + last_name: ['ln', 'last', 'lastname'], + date_of_birth: ['dob', 'birthday']} + + tbl.map_and_coalesce_columns(column_map) + + print (tbl) + >> {{first_name: 'Jane', last_name: 'Doe', 'date_of_birth': '1908-01-01'}} + """ + + for key, value in column_map.items(): + coalesce_list = value + # if the column in the mapping dict isn't actually in the table, + # remove it from the list of columns to coalesce + for item in coalesce_list: + if item not in self.columns: + coalesce_list.remove(item) + # if the key from the mapping dict already exists in the table, + # rename it so it can be coalesced with other possible columns + if key in self.columns: + self.rename_column(key, f'{key}_temp') + coalesce_list.insert(0, f'{key}_temp') + + # coalesce columns + self.coalesce_columns(key, coalesce_list, remove_source_columns=True) + + return self
+ +
[docs] def get_column_types(self, column): + """ + Return all of the Python types for values in a given column + + `Args:` + column: str + Name of the column to analyze + `Returns:` + list + A list of Python types + """ + + return list(petl.typeset(self.table, column))
+ +
[docs] def get_columns_type_stats(self): + """ + Return descriptive stats for all columns + + `Returns:` + list + A list of dicts + `Returns:` + list + A list of dicts, each containing a column 'name' and a 'type' list + """ + + return [{'name': col, 'type': self.get_column_types(col)} + for col in self.table.columns()]
+ +
[docs] def convert_table(self, *args): + """ + Transform all cells in a table via arbitrary functions, method invocations or dictionary + translations. This method is useful for cleaning fields and data hygiene functions such + as regex. This method leverages the petl ``convert()`` method. Example usage can be + found `here` <https://petl.readthedocs.io/en/v0.24/transform.html#petl.convert>`_. + + `Args:` + \*args: str, method or variable + The update function, method, or variable to process the update. Can also + `Returns:` + `Parsons Table` and also updates self + """ # noqa: W605 + + self.convert_column(self.columns, *args) + + return self
+ +
[docs] def unpack_dict(self, column, keys=None, include_original=False, + sample_size=1000, missing=None, prepend=True, + prepend_value=None): + """ + Unpack dictionary values from one column into separate columns + + `Args:` + column: str + The column name to unpack + keys: list + The dict keys in the column to unpack. If ``None`` will unpack + all. + include_original: boolean + Retain original column after unpacking + sample_size: int + Number of rows to sample before determining columns + missing: str + If a value is missing, the value to fill it with + prepend: + Prepend the column name of the unpacked values. Useful for + avoiding duplicate column names + prepend_value: + Value to prepend new columns if ``prepend=True``. If None, will + set to column name. + """ + + if prepend: + if prepend_value is None: + prepend_value = column + + self.table = petl.convert( + self.table, + column, + lambda v: self._prepend_dict(v, prepend_value)) + + self.table = petl.unpackdict( + self.table, column, keys=keys, includeoriginal=include_original, + samplesize=sample_size, missing=missing) + + return self
+ +
[docs] def unpack_list(self, column, include_original=False, missing=None, replace=False, + max_columns=None): + """ + Unpack list values from one column into separate columns. Numbers the + columns. + + .. code-block:: python + + # Begin with a list in column + json = [{'id': '5421', + 'name': 'Jane Green', + 'phones': ['512-699-3334', '512-222-5478'] + } + ] + + tbl = Table(json) + print (tbl) + >>> {'id': '5421', 'name': 'Jane Green', 'phones': ['512-699-3334', '512-222-5478']} + + tbl.unpack_list('phones', replace=True) + print (tbl) + >>> {'id': '5421', 'name': 'Jane Green', 'phones_0': '512-699-3334', 'phones_1': '512-222-5478'} # noqa: E501 + + `Args:` + column: str + The column name to unpack + include_original: boolean + Retain original column after unpacking + sample_size: int + Number of rows to sample before determining columns + missing: str + If a value is missing, the value to fill it with + replace: boolean + Return new table or replace existing + max_columns: int + The maximum number of columns to unpack + `Returns:` + None + """ + + # Convert all column values to list to avoid unpack errors + self.table = petl.convert( + self.table, column, lambda v: [v] if not isinstance(v, list) else v + ) + + # Find the max number of values in list for all rows + col_count = 0 + for row in self.cut(column): + if len(row[column]) > col_count: + col_count = len(row[column]) + + # If max columns provided, set max columns + if col_count > 0 and max_columns: + col_count = max_columns + + # Create new column names "COL_01, COL_02" + new_cols = [] + for i in range(col_count): + new_cols.append(column + '_' + str(i)) + + tbl = petl.unpack(self.table, column, new_cols, + include_original=include_original, missing=missing) + + if replace: + self.table = tbl + + else: + return tbl
+ +
[docs] def unpack_nested_columns_as_rows(self, column, key='id', expand_original=False): + """ + Unpack list or dict values from one column into separate rows. + Not recommended for JSON columns (i.e. lists of dicts), but can handle columns + with any mix of types. Makes use of PETL's `melt()` method. + + `Args:` + column: str + The column name to unpack + key: str + The column to use as a key when unpacking. Defaults to `id` + expand_original: boolean or int + If `True`: Add resulting unpacked rows (with all other columns) to original + If `int`: Add to original unless the max added per key is above the given number + If `False` (default): Return unpacked rows (with `key` column only) as standalone + Removes packed list and dict rows from original either way. + `Returns:` + If `expand_original`, original table with packed rows replaced by unpacked rows + Otherwise, standalone table with key column and unpacked values only + """ + + if isinstance(expand_original, int) and expand_original is not True: + lengths = {len(row[column]) for row in self if isinstance(row[column], (dict, list))} + max_len = sorted(lengths, reverse=True)[0] + if max_len > expand_original: + expand_original = False + + if expand_original: + # Include all columns and filter out other non-dict types in table_list + table = self + table_list = table.select_rows(lambda row: isinstance(row[column], list)) + else: + # Otherwise, include only key and column, but keep all non-dict types in table_list + table = self.cut(key, column) + table_list = table.select_rows(lambda row: not isinstance(row[column], dict)) + + # All the columns other than column to ignore while melting + ignore_cols = table.columns + ignore_cols.remove(column) + + # Unpack lists as separate columns + table_list.unpack_list(column, replace=True) + + # Rename the columns to retain only the number + for col in table_list.columns: + if f'{column}_' in col: + table_list.rename_column(col, col.replace(f'{column}_', "")) + + # Filter dicts and unpack as separate columns + table_dict = table.select_rows(lambda row: isinstance(row[column], dict)) + table_dict.unpack_dict(column, prepend=False) + + from parsons.etl.table import Table + + # Use melt to pivot both sets of columns into their own Tables and clean out None values + melted_list = Table(petl.melt(table_list.table, ignore_cols)) + melted_dict = Table(petl.melt(table_dict.table, ignore_cols)) + + melted_list.remove_null_rows('value') + melted_dict.remove_null_rows('value') + + melted_list.rename_column('variable', column) + melted_dict.rename_column('variable', column) + + # Combine the list and dict Tables + melted_list.concat(melted_dict) + + import hashlib + + if expand_original: + # Add unpacked rows to the original table (minus packed rows) + orig = self.select_rows(lambda row: not isinstance(row[column], (dict, list))) + orig.concat(melted_list) + # Add unique id column by hashing all the other fields + if 'uid' not in self.columns: + orig.add_column('uid', lambda row: hashlib.md5( + str.encode( + ''.join([str(x) for x in row]) + ) + ).hexdigest()) + orig.move_column('uid', 0) + + # Rename value column in case this is done again to this Table + orig.rename_column('value', f'{column}_value') + + # Keep column next to column_value + orig.move_column(column, -1) + output = orig + else: + orig = self.remove_column(column) + # Add unique id column by hashing all the other fields + melted_list.add_column('uid', lambda row: hashlib.md5( + str.encode( + ''.join([str(x) for x in row]) + ) + ).hexdigest()) + melted_list.move_column('uid', 0) + output = melted_list + + self = orig + return output
+ +
[docs] def long_table(self, key, column, key_rename=None, retain_original=False, + prepend=True, prepend_value=None): + """ + Create a new long parsons table from a column, including the foreign + key. + + .. code-block:: python + + # Begin with nested dicts in a column + json = [{'id': '5421', + 'name': 'Jane Green', + 'emails': [{'home': 'jane@gmail.com'}, + {'work': 'jane@mywork.com'} + ] + } + ] + tbl = Table(json) + print (tbl) + >>> {'id': '5421', 'name': 'Jane Green', 'emails': [{'home': 'jane@gmail.com'}, {'work': 'jane@mywork.com'}]} # noqa: E501 + >>> {'id': '5421', 'name': 'Jane Green', 'emails': [{'home': 'jane@gmail.com'}, {'work': 'jane@mywork.com'}]} # noqa: E501 + + # Create skinny table of just the nested dicts + email_skinny = tbl.long_table(['id'], 'emails') + + print (email_skinny) + >>> {'id': '5421', 'emails_home': 'jane@gmail.com', 'emails_work': None} + >>> {'id': '5421', 'emails_home': None, 'emails_work': 'jane@mywork.com'} + + `Args:` + key: lst + The columns to retain in the long table (e.g. foreign keys) + column: str + The column name to make long + key_rename: dict + The new name for the foreign key to better identify it. For + example, you might want to rename ``id`` to ``person_id``. + Ex. {'KEY_NAME': 'NEW_KEY_NAME'} + retain_original: boolean + Retain the original column from the source table. + prepend: + Prepend the column name of the unpacked values. Useful for + avoiding duplicate column names + prepend_value: + Value to prepend new columns if ``prepend=True``. If None, will + set to column name. + `Returns:` + Parsons Table + The new long table + """ + + if type(key) == str: + key = [key] + + lt = self.cut(*key, column) # Create a table of key and column + lt.unpack_list(column, replace=True) # Unpack the list + lt.table = petl.melt(lt.table, key) # Melt into a long table + lt = lt.cut(*key, 'value') # Get rid of column names created in unpack + lt.rename_column('value', column) # Rename 'value' to old column name + lt.remove_null_rows(column) # Remove null values + + # If a new key name is specified, rename + if key_rename: + for k, v in key_rename.items(): + lt.rename_column(k, v) + + # If there is a nested dict in the column, unpack it + if lt.num_rows > 0 and isinstance(lt.table[column][0], dict): + lt.unpack_dict(column, prepend=prepend, prepend_value=prepend_value) + + if not retain_original: + self.remove_column(column) + + return lt
+ +
[docs] def cut(self, *columns): + """ + Return a table of selection of columns + + `Args:` + \*columns: str + Columns in the parsons table + `Returns:` + A new parsons table containing the selected columnns + """ # noqa: W605 + + from parsons.etl.table import Table + + return Table(petl.cut(self.table, *columns))
+ +
[docs] def select_rows(self, *filters): + """ + Select specific rows from a Parsons table based on the passed + filters. + + Example filters: + + .. code-block:: python + + tbl = Table([['foo', 'bar', 'baz'], + ['c', 4, 9.3], + ['a', 2, 88.2], + ['b', 1, 23.3],]) + + # You can structure the filter in multiple wayss + + # Lambda Function + tbl2 = tbl.select_rows(lambda row: row.foo == 'a' and row.baz > 88.1) + tbl2 + >>> {foo: 'a', 'bar': 2, 'baz': 88.1} + + # Expression String + tbl3 = tbl.select_rows("{foo} == 'a' and {baz} > 88.1") + tbl3 + >>> {foo: 'a', 'bar': 2, 'baz': 88.1} + + `Args:` + \*filters: function or str + `Returns:` + A new parsons table containing the selected rows + """ # noqa: W605 + + from parsons.etl.table import Table + + return Table(petl.select(self.table, *filters))
+ +
[docs] def remove_null_rows(self, columns, null_value=None): + """ + Remove rows if the values in a column are ``None``. If multiple columns + are passed as list, it will remove all rows with null values in any + of the passed columns. + + `Args:` + column: str or list + The column or columns to analyze + null_value: int or float or str + The null value + `Returns:` + ``None`` + """ + if isinstance(columns, str): + columns = [columns] + + for col in columns: + self.table = petl.selectisnot(self.table, col, null_value) + + return self
+ + def _prepend_dict(self, dict_obj, prepend): + # Internal method to rename dict keys + + new_dict = {} + + for k, v in dict_obj.items(): + + new_dict[prepend + '_' + k] = v + + return new_dict + +
[docs] def stack(self, *tables, missing=None): + """ + Stack Parsons tables on top of one another. + + Similar to ``table.concat()``, except no attempt is made to align fields from + different tables. + + `Args:` + tables: Parsons Table or list + A single table, or a list of tables + missing: bool + The value to use when padding missing values + `Returns:` + ``None`` + """ + + if type(tables) not in [list, tuple]: + tables = [tables] + petl_tables = [tbl.table for tbl in tables] + + self.table = petl.stack(self.table, *petl_tables, missing=missing)
+ +
[docs] def concat(self, *tables, missing=None): + """ + Concatenates one or more tables onto this one. + + Note that the tables do not need to share exactly the same fields. + Any missing fields will be padded with None, or whatever is provided via the + ``missing`` keyword argument. + + `Args:` + tables: Parsons Table or list + A single table, or a list of tables + missing: bool + The value to use when padding missing values + `Returns:` + ``None`` + """ + + if type(tables) not in [list, tuple]: + tables = [tables] + petl_tables = [tbl.table for tbl in tables] + + self.table = petl.cat(self.table, *petl_tables, missing=missing)
+ +
[docs] def chunk(self, rows): + """ + Divides a Parsons table into smaller tables of a specified row count. If the table + cannot be divided evenly, then the final table will only include the remainder. + + `Args:` + rows: int + The number of rows of each new Parsons table + `Returns:` + List of Parsons tables + """ + + from parsons.etl import Table + return [Table(petl.rowslice(self.table, i, i+rows)) for i in range(0, self.num_rows, rows)]
+ +
[docs] @staticmethod + def get_normalized_column_name(column_name): + """ + Returns a column name with whitespace removed, non-alphanumeric characters removed, and + everything lowercased. + + `Returns:` + str + Normalized column name + """ + + column_name = column_name.lower().strip() + return ''.join(c for c in column_name if c.isalnum())
+ +
[docs] def match_columns(self, desired_columns, fuzzy_match=True, if_extra_columns='remove', + if_missing_columns='add'): + """ + Changes the column names and ordering in this Table to match a list of desired column + names. + + `Args:` + desired_columns: list + Ordered list of desired column names + fuzzy_match: bool + Whether to normalize column names when matching against the desired column names, + removing whitespace and non-alphanumeric characters, and lowercasing everything. + Eg. With this flag set, "FIRST NAME" would match "first_name". + If the Table has two columns that normalize to the same string (eg. "FIRST NAME" + and "first_name"), the latter will be considered an extra column. + if_extra_columns: string + If the Table has columns that don't match any desired columns, either 'remove' + them, 'ignore' them, or 'fail' (raising an error). + if_missing_columns: string + If the Table is missing some of the desired columns, either 'add' them (with a + value of None), 'ignore' them, or 'fail' (raising an error). + + `Returns:` + `Parsons Table` and also updates self + """ + + from parsons.etl import Table # Just trying to avoid recursive imports. + + normalize_fn = Table.get_normalized_column_name if fuzzy_match else (lambda s: s) + + desired_columns_normalized = { + normalize_fn(col): col for col in desired_columns + } + + # Check for extra columns in the Table. + for orig_col in self.columns: + normalized_col = normalize_fn(orig_col) + if normalized_col not in desired_columns_normalized: + if if_extra_columns == 'fail': + raise TypeError(f"Table has extra column {orig_col}") + elif if_extra_columns == 'remove': + self.remove_column(orig_col) + elif if_extra_columns != 'ignore': + raise TypeError(f"Invalid option {if_extra_columns} for " + "argument `if_extra_columns`") + else: + # We matched a desired column. Remove it from our list, so if there is another + # column in our Table that has the same normalized name, we consider it an + # extra column. + desired_columns_normalized.pop(normalized_col) + + # Regenerate the desired columns normalized dict, since we removed any matches + # from it above. + # Note we reverse the desired columns here, to make reordering easier later. + # Cast desired_columns to a list in case someone gave us a dict or other iterable + # that can't be reversed. + desired_columns_normalized = { + normalize_fn(col): col for col in reversed(list(desired_columns)) + } + + tbl_columns_normalized = { + normalize_fn(col): col for col in self.columns + } + + # Check for missing columns + for normalized_col, orig_col in desired_columns_normalized.items(): + if normalized_col not in tbl_columns_normalized: + if if_missing_columns == 'fail': + raise TypeError(f"Table is missing column {orig_col}") + elif if_missing_columns == 'add': + self.add_column(orig_col) + tbl_columns_normalized[normalized_col] = orig_col + elif if_missing_columns != 'ignore': + raise TypeError(f"Invalid option {if_missing_columns} for " + "argument `if_missing_columns`") + + # Change column ordering and names to match the desired columns + for desired_normalized_col, desired_orig_col in desired_columns_normalized.items(): + # Note that we ignore any desired columns still not in the Table, given + # that we already checked what the caller wanted to do above. + if desired_normalized_col in tbl_columns_normalized: + tbl_orig_col = tbl_columns_normalized[desired_normalized_col] + if tbl_orig_col != desired_orig_col: + self.rename_column(tbl_orig_col, desired_orig_col) + self.move_column(desired_orig_col, 0) + + return self
+ +
[docs] def reduce_rows(self, columns, reduce_func, headers, presorted=False, + **kwargs): + """ + Group rows by a column or columns, then reduce the groups to a single row. + + Based on the `rowreduce petl function <https://petl.readthedocs.io/en/stable/transform.html#petl.transform.reductions.rowreduce>`_. + + For example, the output from the query to get a table's definition is + returned as one component per row. The `reduce_rows` method can be used + to reduce all those to a single row containg the entire query. + + .. code-block:: python + + >>> ddl = rs.query(sql_to_get_table_ddl) + >>> ddl.table + + +--------------+--------------+----------------------------------------------------+ + | schemaname | tablename | ddl | + +==============+==============+====================================================+ + | 'db_scratch' | 'state_fips' | '--DROP TABLE db_scratch.state_fips;' | + +--------------+--------------+----------------------------------------------------+ + | 'db_scratch' | 'state_fips' | 'CREATE TABLE IF NOT EXISTS db_scratch.state_fips' | + +--------------+--------------+----------------------------------------------------+ + | 'db_scratch' | 'state_fips' | '(' | + +--------------+--------------+----------------------------------------------------+ + | 'db_scratch' | 'state_fips' | '\\tstate VARCHAR(1024) ENCODE RAW' | + +--------------+--------------+----------------------------------------------------+ + | 'db_scratch' | 'state_fips' | '\\t,stusab VARCHAR(1024) ENCODE RAW' | + +--------------+--------------+----------------------------------------------------+ + + >>> reducer_fn = lambda columns, rows: [ + ... f"{columns[0]}.{columns[1]}", + ... '\\n'.join([row[2] for row in rows])] + >>> ddl.reduce_rows( + ... ['schemaname', 'tablename'], + ... reducer_fn, + ... ['tablename', 'ddl'], + ... presorted=True) + >>> ddl.table + + +-------------------------+-----------------------------------------------------------------------+ + | tablename | ddl | + +=========================+=======================================================================+ + | 'db_scratch.state_fips' | '--DROP TABLE db_scratch.state_fips;\\nCREATE TABLE IF NOT EXISTS | + | | db_scratch.state_fips\\n(\\n\\tstate VARCHAR(1024) ENCODE RAW\\n\\t | + | | ,db_scratch.state_fips\\n(\\n\\tstate VARCHAR(1024) ENCODE RAW | + | | \\n\\t,stusab VARCHAR(1024) ENCODE RAW\\n\\t,state_name | + | | VARCHAR(1024) ENCODE RAW\\n\\t,statens VARCHAR(1024) ENCODE | + | | RAW\\n)\\nDISTSTYLE EVEN\\n;' | + +-------------------------+-----------------------------------------------------------------------+ + + `Args:` + columns: list + The column(s) by which to group the rows. + reduce_func: fun + The function by which to reduce the rows. Should take the 2 + arguments, the columns list and the rows list and return a list. + `reducer(columns: list, rows: list) -> list;` + headers: list + The list of headers for modified table. The length of `headers` + should match the length of the list returned by the reduce + function. + presorted: bool + If false, the row will be sorted. + `Returns:` + `Parsons Table` and also updates self + + """ # noqa: E501,E261 + + self.table = petl.rowreduce( + self.table, + columns, + reduce_func, + header=headers, + presorted=presorted, + **kwargs) + + return self
+ +
[docs] def sort(self, columns=None, reverse=False): + """ + Sort the rows a table. + + `Args:` + sort_columns: list or str + Sort by a single column or a list of column. If ``None`` then + will sort columns from left to right. + reverse: boolean + Sort rows in reverse order. + `Returns:` + `Parsons Table` and also updates self + """ + + self.table = petl.sort(self.table, key=columns, reverse=reverse) + + return self
+ +
[docs] def set_header(self, new_header): + """ + Replace the header row of the table. + + `Args:` + new_header: list + List of new header column names + `Returns:` + `Parsons Table` and also updates self + """ + self.table = petl.setheader(self.table, new_header) + return self
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/etl/table.html b/docs/html/_modules/parsons/etl/table.html new file mode 100644 index 0000000000..46c9512a47 --- /dev/null +++ b/docs/html/_modules/parsons/etl/table.html @@ -0,0 +1,394 @@ + + + + + + + + + + + parsons.etl.table — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.etl.table

+from parsons.etl.etl import ETL
+from parsons.civis.civis import CivisClient
+import pandas as pd
+import petl as etl
+import civis
+import os
+import gzip
+
+
[docs]class Table(ETL): + + def __init__(self, lst): + + if type(lst) == list: + + self.table = etl.fromdicts(lst) + + else: + + self.table = lst + + def __repr__(self): + + return repr(etl.dicts(self.table)) + + def __getitem__(self, item): + + return etl.dicts(self.table)[item] + + #return self._lst[item] + +
[docs] def to_df(self, index=None, exclude=None, columns=None, coerce_float=False): + """Outputs table as a Pandas Dataframe + + `Args:` + + index: str, list + Field of array to use as the index, alternately a specific set of + input labels to use + + exclude: list + Columns or fields to exclude + + columns: list + Column names to use. If the passed data do not have names associated with + them, this argument provides names for the columns. Otherwise this argument + indicates the order of the columns in the result (any names not found in + the data will become all-NA columns) + + `Returns:` + + dataframe + Pandas DataFrame object + """ + + return etl.todataframe(self.table, index=index, exclude=exclude, columns=columns, + coerce_float=coerce_float)
+ +
[docs] def to_csv(self, source=None, encoding=None, errors='strict', write_header=False, **csvargs): + """Outputs table to a CSV. Additional additional key word arguments + are passed to ``csv.writer()``. So, e.g., to override the delimiter + from the default CSV dialect, provide the delimiter keyword argument. + + + .. warning:: + If a file already exists at the given location, it will be + overwritten. If you wish to append to a table, use the + :meth:`to_append_csv` method. + + `Args:` + + source: str or file-like object + The output path for the CSV + + encoding: str + The CSV encoding type for `csv.writer() + <https://docs.python.org/2/library/csv.html#csv.writer/>`_ + + errors: str + Raise an Error if encountered + + write_header: boolean + Include header in output + + `Returns:` + + Parsons Table + See :ref:`parsons-table` for output options. + """ + + etl.tocsv(self.table, source=source, encoding=encoding, errors=errors, write_header=write_header, **csvargs) + + return None
+ +
[docs] def to_append_csv(self, source=None, encoding=None, errors='strict', write_header=False, **csvargs): + """Appends table to a CSV. Additional additional key word arguments + are passed to ``csv.writer()``. So, e.g., to override the delimiter + from the default CSV dialect, provide the delimiter keyword argument. + + `Args:` + + source: str or file-like object + The output path for the CSV + + encoding: str + The CSV encoding type for `csv.writer() + <https://docs.python.org/2/library/csv.html#csv.writer/>`_ + + errors: str + Raise an Error if encountered + + write_header: boolean + Include header in output + + `Returns:` + + Parsons Table + See :ref:`parsons-table` for output options. + """ + + etl.appendcsv(self.table, source=source, encoding=encoding, errors=errors, + write_header=write_header, **csvargs)
+ + def to_petl(self): + + return self.table + +
[docs] def to_civis(self, db, table, api_key=None, max_errors=None, + existing_table_rows='fail', diststyle=None, distkey=None, + sortkey1=None, sortkey2=None, wait=True, **civisargs): + """Write the table to a Civis Redshift cluster. Additional key word + arguments can passed to `civis.io.dataframe_to_civis() <https://civis-python.readthedocs.io/en/v1.9.0/generated/civis.io.dataframe_to_civis.html#civis.io.dataframe_to_civis>`_ + + db: str or int + The Civis Database. Can be database name or ID + + table: str + The schema and table you want to upload to. E.g., + 'scratch.table'. Schemas or tablenames with periods + must be double quoted, e.g. 'scratch."my.table"'. + + api_key: str + Your Civis API key. If not given, the CIVIS_API_KEY + environment variable will be used. + + max_errors: int + The maximum number of rows with errors to remove from + the import before failing. + + existing_table_rows: str + The behaviour if a table with the requested name already + exists. One of `'fail'`, `'truncate'`, `'append'` or `'drop'`. + Defaults to `'fail'`. + + diststyle: str + The distribution style for the table. One of `'even'`, `'all'` or + `'key'`. + + distkey: str + The column to use as the distkey for the table. + + sortkey1: str + The column to use as the sortkey for the table. + + sortkey2: str + The second column in a compound sortkey for the table. + + wait: boolean + Wait for write job to complete before exiting method. + + """ + + civis = CivisClient(db=db, api_key=api_key) + return civis.table_import(self, table, max_errors=None, + existing_table_rows='fail', diststyle=None, distkey=None, + sortkey1=None, sortkey2=None, wait=True, **civisargs)
+ + @classmethod + def from_csv(cls, csv_obj): + + return cls(etl.fromcsv(etl.io.sources.MemorySource(csv_obj.getvalue()))) + + @classmethod + def from_columns(cls, cols, header=None, missing=None): + + return cls(etl.wrap(cols))
+ + + + + +
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2018, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/etl/tofrom.html b/docs/html/_modules/parsons/etl/tofrom.html new file mode 100644 index 0000000000..2eb8d11c16 --- /dev/null +++ b/docs/html/_modules/parsons/etl/tofrom.html @@ -0,0 +1,957 @@ + + + + + + + + + + + parsons.etl.tofrom — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.etl.tofrom

+import petl
+import json
+import io
+import gzip
+from parsons.utilities import files, zip_archive
+
+
+
[docs]class ToFrom(object): + +
[docs] def to_dataframe(self, index=None, exclude=None, columns=None, + coerce_float=False): + """ + Outputs table as a Pandas Dataframe + + `Args:` + index: str, list + Field of array to use as the index, alternately a specific set + of input labels to use + exclude: list + Columns or fields to exclude + columns: list + Column names to use. If the passed data do not have names + associated with them, this argument provides names for the + columns. Otherwise this argument indicates the order of the + columns in the result (any names not found in the data will + become all-NA columns) + `Returns:` + dataframe + Pandas DataFrame object + """ + + return petl.todataframe(self.table, index=index, exclude=exclude, + columns=columns, coerce_float=coerce_float)
+ +
[docs] def to_html(self, local_path=None, encoding=None, errors='strict', + index_header=False, caption=None, tr_style=None, + td_styles=None, truncate=None): + """ + Outputs table to html. + + .. warning:: + If a file already exists at the given location, it will be + overwritten. + + `Args:` + local_path: str + The path to write the html locally. If not specified, a temporary file will be + created and returned, and that file will be removed automatically when the script + is done running. + encoding: str + The encoding type for `csv.writer() + <https://docs.python.org/2/library/csv.html#csv.writer/>`_ + errors: str + Raise an Error if encountered + index_header: boolean + Prepend index to column names; Defaults to False. + caption: str + A caption to include with the html table. + tr_style: str or callable + Style to be applied to the table row. + td_styles: str, dict or callable + Styles to be applied to the table cells. + truncate: int + Length of cell data. + `Returns:` + str + The path of the new file + """ + + if not local_path: + local_path = files.create_temp_file(suffix=".html") + + petl.tohtml(self.table, + source=local_path, + encoding=encoding, + errors=errors, + caption=caption, + index_header=index_header, + tr_style=tr_style, + td_styles=td_styles, + truncate=truncate) + + return local_path
+ +
[docs] def to_csv(self, local_path=None, temp_file_compression=None, encoding=None, errors='strict', + write_header=True, csv_name=None, **csvargs): + """ + Outputs table to a CSV. Additional key word arguments are passed to ``csv.writer()``. So, + e.g., to override the delimiter from the default CSV dialect, provide the delimiter + keyword argument. + + .. warning:: + If a file already exists at the given location, it will be + overwritten. + + `Args:` + local_path: str + The path to write the csv locally. If it ends in ".gz" or ".zip", the file will be + compressed. If not specified, a temporary file will be created and returned, + and that file will be removed automatically when the script is done running. + temp_file_compression: str + If a temp file is requested (ie. no ``local_path`` is specified), the compression + type for that file. Currently "None", "gzip" or "zip" are supported. + If a ``local_path`` is specified, this argument is ignored. + encoding: str + The CSV encoding type for `csv.writer() + <https://docs.python.org/2/library/csv.html#csv.writer/>`_ + errors: str + Raise an Error if encountered + write_header: boolean + Include header in output + csv_name: str + If ``zip`` compression (either specified or inferred), the name of csv file + within the archive. + \**csvargs: kwargs + ``csv_writer`` optional arguments + + `Returns:` + str + The path of the new file + """ # noqa: W605 + + # If a zip archive. + if files.zip_check(local_path, temp_file_compression): + return self.to_zip_csv(archive_path=local_path, + encoding=encoding, + errors=errors, + write_header=write_header, + csv_name=csv_name, + **csvargs) + + if not local_path: + suffix = '.csv' + files.suffix_for_compression_type(temp_file_compression) + local_path = files.create_temp_file(suffix=suffix) + + # Create normal csv/.gzip + petl.tocsv(self.table, + source=local_path, + encoding=encoding, + errors=errors, + write_header=write_header, + **csvargs) + + return local_path
+ +
[docs] def append_csv(self, local_path, encoding=None, errors='strict', **csvargs): + """ + Appends table to an existing CSV. + + Additional additional key word arguments + are passed to ``csv.writer()``. So, e.g., to override the delimiter + from the default CSV dialect, provide the delimiter keyword argument. + + `Args:` + local_path: str + The local path of an existing CSV file. If it ends in ".gz", the file will + be compressed. + encoding: str + The CSV encoding type for `csv.writer() + <https://docs.python.org/2/library/csv.html#csv.writer/>`_ + errors: str + Raise an Error if encountered + \**csvargs: kwargs + ``csv_writer`` optional arguments + + `Returns:` + str + The path of the file + """ # noqa: W605 + + petl.appendcsv(self.table, + source=local_path, + encoding=encoding, + errors=errors, + **csvargs) + return local_path
+ +
[docs] def to_zip_csv(self, archive_path=None, csv_name=None, encoding=None, + errors='strict', write_header=True, if_exists='replace', **csvargs): + """ + Outputs table to a CSV in a zip archive. Additional key word arguments are passed to + ``csv.writer()``. So, e.g., to override the delimiter from the default CSV dialect, + provide the delimiter keyword argument. Use thismethod if you would like to write + multiple csv files to the same archive. + + .. warning:: + If a file already exists in the archive, it will be overwritten. + + `Args:` + archive_path: str + The path to zip achive. If not specified, a temporary file will be created and + returned, and that file will be removed automatically when the script is done + running. + csv_name: str + The name of the csv file to be stored in the archive. If ``None`` will use + the archive name. + encoding: str + The CSV encoding type for `csv.writer() + <https://docs.python.org/2/library/csv.html#csv.writer/>`_ + errors: str + Raise an Error if encountered + write_header: boolean + Include header in output + if_exists: str + If archive already exists, one of 'replace' or 'append' + \**csvargs: kwargs + ``csv_writer`` optional arguments + + `Returns:` + str + The path of the archive + """ # noqa: W605 + + if not archive_path: + archive_path = files.create_temp_file(suffix='.zip') + + cf = self.to_csv(encoding=encoding, errors=errors, write_header=write_header, **csvargs) + + if not csv_name: + csv_name = files.extract_file_name(archive_path, include_suffix=False) + '.csv' + + return zip_archive.create_archive(archive_path, cf, file_name=csv_name, + if_exists=if_exists)
+ +
[docs] def to_json(self, local_path=None, temp_file_compression=None, line_delimited=False): + """ + Outputs table to a JSON file + + .. warning:: + If a file already exists at the given location, it will be + overwritten. + + `Args:` + local_path: str + The path to write the JSON locally. If it ends in ".gz", it will be + compressed first. If not specified, a temporary file will be created and returned, + and that file will be removed automatically when the script is done running. + temp_file_compression: str + If a temp file is requested (ie. no ``local_path`` is specified), the compression + type for that file. Currently "None" and "gzip" are supported. + If a ``local_path`` is specified, this argument is ignored. + line_delimited: bool + Whether the file will be line-delimited JSON (with a row on each line), or a proper + JSON file. + + `Returns:` + str + The path of the new file + """ + + if not local_path: + suffix = '.json' + files.suffix_for_compression_type(temp_file_compression) + local_path = files.create_temp_file(suffix=suffix) + + # Note we don't use the much simpler petl.tojson(), since that method reads the whole + # table into memory before writing to file. + + if files.is_gzip_path(local_path): + open_fn = gzip.open + mode = 'w+t' + else: + open_fn = open + mode = 'w' + + with open_fn(local_path, mode) as file: + if not line_delimited: + file.write('[') + + i = 0 + for row in self: + if i: + if not line_delimited: + file.write(',') + file.write('\n') + i += 1 + json.dump(row, file) + + if not line_delimited: + file.write(']') + + return local_path
+ +
[docs] def to_dicts(self): + """ + Output table as a list of dicts. + + `Returns:` + list + """ + + return list(petl.dicts(self.table))
+ +
[docs] def to_sftp_csv(self, remote_path, host, username, password, port=22, encoding=None, + compression=None, errors='strict', write_header=True, + rsa_private_key_file=None, **csvargs): + """ + Writes the table to a CSV file on a remote SFTP server + + `Args:` + remote_path: str + The remote path of the file. If it ends in '.gz', the file will be compressed. + host: str + The remote host + username: str + The username to access the SFTP server + password: str + The password to access the SFTP server + port: int + The port number of the SFTP server + encoding: str + The CSV encoding type for `csv.writer() + <https://docs.python.org/2/library/csv.html#csv.writer/>`_ + errors: str + Raise an Error if encountered + write_header: boolean + Include header in output + rsa_private_key_file str + Absolute path to a private RSA key used + to authenticate stfp connection + \**csvargs: kwargs + ``csv_writer`` optional arguments + """ # noqa: W605 + + from parsons.sftp import SFTP + + sftp = SFTP(host, username, password, port, rsa_private_key_file) + + compression = files.compression_type_for_path(remote_path) + + local_path = self.to_csv( + temp_file_compression=compression, encoding=encoding, errors=errors, + write_header=write_header, **csvargs) + sftp.put_file(local_path, remote_path)
+ +
[docs] def to_s3_csv(self, bucket, key, aws_access_key_id=None, + aws_secret_access_key=None, compression=None, encoding=None, + errors='strict', write_header=True, acl='bucket-owner-full-control', + public_url=False, public_url_expires=3600, **csvargs): + """ + Writes the table to an s3 object as a CSV + + `Args:` + bucket: str + The s3 bucket to upload to + key: str + The s3 key to name the file. If it ends in '.gz' or '.zip', the file will be + compressed. + aws_access_key_id: str + Required if not included as environmental variable + aws_secret_access_key: str + Required if not included as environmental variable + compression: str + The compression type for the s3 object. Currently "None", "zip" and "gzip" are + supported. If specified, will override the key suffix. + encoding: str + The CSV encoding type for `csv.writer() + <https://docs.python.org/2/library/csv.html#csv.writer/>`_ + errors: str + Raise an Error if encountered + write_header: boolean + Include header in output + public_url: boolean + Create a public link to the file + public_url_expire: 3600 + The time, in seconds, until the url expires if ``public_url`` set to ``True``. + acl: str + The S3 permissions on the file + \**csvargs: kwargs + ``csv_writer`` optional arguments + `Returns:` + Public url if specified. If not ``None``. + """ # noqa: W605 + + compression = compression or files.compression_type_for_path(key) + + csv_name = files.extract_file_name(key, include_suffix=False) + '.csv' + + # Save the CSV as a temp file + local_path = self.to_csv(temp_file_compression=compression, + encoding=encoding, + errors=errors, + write_header=write_header, + csv_name=csv_name, + **csvargs) + + # Put the file on S3 + from parsons.aws import S3 + self.s3 = S3(aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) + self.s3.put_file(bucket, key, local_path, acl=acl) + + if public_url: + return self.s3.get_url(bucket, key, expires_in=public_url_expires) + else: + return None
+ +
[docs] def to_redshift(self, table_name, username=None, password=None, host=None, + db=None, port=None, **copy_args): + """ + Write a table to a Redshift database. Note, this requires you to pass + AWS S3 credentials or store them as environmental variables. + + Args: + table_name: str + The table name and schema (``my_schema.my_table``) to point the file. + username: str + Required if env variable ``REDSHIFT_USERNAME`` not populated + password: str + Required if env variable ``REDSHIFT_PASSWORD`` not populated + host: str + Required if env variable ``REDSHIFT_HOST`` not populated + db: str + Required if env variable ``REDSHIFT_DB`` not populated + port: int + Required if env variable ``REDSHIFT_PORT`` not populated. Port 5439 is typical. + \**copy_args: kwargs + See :func:`~parsons.databases.Redshift.copy`` for options. + + Returns: + ``None`` + """ # noqa: W605 + + from parsons.databases.redshift import Redshift + rs = Redshift(username=username, password=password, host=host, db=db, port=port) + rs.copy(self, table_name, **copy_args)
+ +
[docs] def to_postgres(self, table_name, username=None, password=None, host=None, + db=None, port=None, **copy_args): + """ + Write a table to a Postgres database. + + Args: + table_name: str + The table name and schema (``my_schema.my_table``) to point the file. + username: str + Required if env variable ``PGUSER`` not populated + password: str + Required if env variable ``PGPASSWORD`` not populated + host: str + Required if env variable ``PGHOST`` not populated + db: str + Required if env variable ``PGDATABASE`` not populated + port: int + Required if env variable ``PGPORT`` not populated. + \**copy_args: kwargs + See :func:`~parsons.databases.Postgres.copy`` for options. + + Returns: + ``None`` + """ # noqa: W605 + + from parsons.databases.postgres import Postgres + pg = Postgres(username=username, password=password, host=host, db=db, port=port) + pg.copy(self, table_name, **copy_args)
+ + def to_petl(self): + + return self.table + +
[docs] def to_civis(self, table, api_key=None, db=None, max_errors=None, + existing_table_rows='fail', diststyle=None, distkey=None, + sortkey1=None, sortkey2=None, wait=True, **civisargs): + """ + Write the table to a Civis Redshift cluster. Additional key word + arguments can passed to `civis.io.dataframe_to_civis() + <https://civis-python.readthedocs.io/en/v1.9.0/generated/civis.io.dataframe_to_civis.html#civis.io.dataframe_to_civis>`_ # noqa: E501 + + `Args` + table: str + The schema and table you want to upload to. E.g., + 'scratch.table'. Schemas or tablenames with periods must be + double quoted, e.g. 'scratch."my.table"'. + api_key: str + Your Civis API key. If not given, the CIVIS_API_KEY environment + variable will be used. + db: str or int + The Civis Database. Can be database name or ID + max_errors: int + The maximum number of rows with errors to remove from + the import before failing. + diststyle: str + The distribution style for the table. One of `'even'`, `'all'` + or `'key'`. + existing_table_rows: str + The behaviour if a table with the requested name already + exists. One of `'fail'`, `'truncate'`, `'append'` or `'drop'`. + Defaults to `'fail'`. + distkey: str + The column to use as the distkey for the table. + sortkey1: str + The column to use as the sortkey for the table. + sortkey2: str + The second column in a compound sortkey for the table. + wait: boolean + Wait for write job to complete before exiting method. + """ + + from parsons.civis.civisclient import CivisClient + civis = CivisClient(db=db, api_key=api_key) + return civis.table_import( + self, table, max_errors=None, + existing_table_rows=existing_table_rows, diststyle=None, + distkey=None, sortkey1=None, sortkey2=None, wait=True, **civisargs)
+ +
[docs] @classmethod + def from_csv(cls, local_path, **csvargs): + """ + Create a ``parsons table`` object from a CSV file + + `Args:` + local_path: obj + A csv formatted local path, url or ftp. If this is a + file path that ends in ".gz", the file will be decompressed first. + \**csvargs: kwargs + ``csv_reader`` optional arguments + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ # noqa: W605 + + remote_prefixes = ["http://", "https://", "ftp://", "s3://"] + if any(map(local_path.startswith, remote_prefixes)): + is_remote_file = True + else: + is_remote_file = False + + if not is_remote_file and not files.has_data(local_path): + raise ValueError('CSV file is empty') + + return cls(petl.fromcsv(local_path, **csvargs))
+ +
[docs] @classmethod + def from_csv_string(cls, str, **csvargs): + """ + Create a ``parsons table`` object from a string representing a CSV. + + `Args:` + str: str + The string object to convert to a table + **csvargs: kwargs + ``csv_reader`` optional arguments + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + bytesio = io.BytesIO(str.encode('utf-8')) + memory_source = petl.io.sources.MemorySource(bytesio.read()) + return cls(petl.fromcsv(memory_source, **csvargs))
+ +
[docs] @classmethod + def from_columns(cls, cols, header=None): + """ + Create a ``parsons table`` from a list of lists organized as columns + + `Args:` + cols: list + A list of lists organized as columns + header: list + List of column names. If not specified, will use dummy column names + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + return cls(petl.fromcolumns(cols, header=header))
+ +
[docs] @classmethod + def from_json(cls, local_path, header=None, line_delimited=False): + """ + Create a ``parsons table`` from a json file + + `Args:` + local_path: list + A JSON formatted local path, url or ftp. If this is a + file path that ends in ".gz", the file will be decompressed first. + header: list + List of columns to use for the destination table. If omitted, columns will + be inferred from the initial data in the file. + line_delimited: bool + Whether the file is line-delimited JSON (with a row on each line), or a proper + JSON file. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + if line_delimited: + if files.is_gzip_path(local_path): + open_fn = gzip.open + else: + open_fn = open + + with open_fn(local_path, 'r') as file: + rows = [json.loads(line) for line in file] + return cls(rows) + + else: + return cls(petl.fromjson(local_path, header=header))
+ +
[docs] @classmethod + def from_redshift(cls, sql, username=None, password=None, host=None, + db=None, port=None): + """ + Create a ``parsons table`` from a Redshift query. + + To pull an entire Redshift table, use a query like ``SELECT * FROM tablename``. + + `Args:` + sql: str + A valid SQL statement + username: str + Required if env variable ``REDSHIFT_USERNAME`` not populated + password: str + Required if env variable ``REDSHIFT_PASSWORD`` not populated + host: str + Required if env variable ``REDSHIFT_HOST`` not populated + db: str + Required if env variable ``REDSHIFT_DB`` not populated + port: int + Required if env variable ``REDSHIFT_PORT`` not populated. Port 5439 is typical. + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + from parsons.databases.redshift import Redshift + rs = Redshift(username=username, password=password, host=host, db=db, port=port) + return rs.query(sql)
+ +
[docs] @classmethod + def from_postgres(cls, sql, username=None, password=None, host=None, db=None, port=None): + """ + Args: + sql: str + A valid SQL statement + username: str + Required if env variable ``PGUSER`` not populated + password: str + Required if env variable ``PGPASSWORD`` not populated + host: str + Required if env variable ``PGHOST`` not populated + db: str + Required if env variable ``PGDATABASE`` not populated + port: int + Required if env variable ``PGPORT`` not populated. + """ + + from parsons.databases.postgres import Postgres + pg = Postgres(username=username, password=password, host=host, db=db, port=port) + return pg.query(sql)
+ +
[docs] @classmethod + def from_s3_csv(cls, bucket, key, from_manifest=False, aws_access_key_id=None, + aws_secret_access_key=None, **csvargs): + """ + Create a ``parsons table`` from a key in an S3 bucket. + + `Args:` + bucket: str + The S3 bucket. + key: str + The S3 key + from_manifest: bool + If True, treats `key` as a manifest file and loads all urls into a `parsons.Table`. + Defaults to False. + aws_access_key_id: str + Required if not included as environmental variable. + aws_secret_access_key: str + Required if not included as environmental variable. + \**csvargs: kwargs + ``csv_reader`` optional arguments + `Returns:` + `parsons.Table` object + """ # noqa: W605 + + from parsons.aws import S3 + s3 = S3(aws_access_key_id, aws_secret_access_key) + + if from_manifest: + with open(s3.get_file(bucket, key)) as fd: + manifest = json.load(fd) + + s3_keys = [x["url"] for x in manifest["entries"]] + + else: + s3_keys = [f"s3://{bucket}/{key}"] + + tbls = [] + for key in s3_keys: + # TODO handle urls that end with '/', i.e. urls that point to "folders" + _, _, bucket_, key_ = key.split("/", 3) + file_ = s3.get_file(bucket_, key_) + if files.compression_type_for_path(key_) == 'zip': + file_ = files.zip_archive.unzip_archive(file_) + + tbls.append(petl.fromcsv(file_, **csvargs)) + + return cls(petl.cat(*tbls))
+ +
[docs] @classmethod + def from_dataframe(cls, dataframe, include_index=False): + """ + Create a ``parsons table`` from a Pandas dataframe. + + `Args:` + dataframe: dataframe + A valid Pandas dataframe objectt + include_index: boolean + Include index column + """ + + return cls(petl.fromdataframe(dataframe, include_index=include_index))
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/facebook_ads/facebook_ads.html b/docs/html/_modules/parsons/facebook_ads/facebook_ads.html new file mode 100644 index 0000000000..5d04cc30eb --- /dev/null +++ b/docs/html/_modules/parsons/facebook_ads/facebook_ads.html @@ -0,0 +1,623 @@ + + + + + + + + + + + parsons.facebook_ads.facebook_ads — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.facebook_ads.facebook_ads
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.facebook_ads.facebook_ads

+import os
+import collections
+import copy
+import logging
+from joblib import Parallel, delayed
+from facebook_business.api import FacebookAdsApi
+from facebook_business.adobjects.adaccount import AdAccount
+from facebook_business.adobjects.customaudience import CustomAudience
+from parsons.etl.table import Table
+
+logger = logging.getLogger(__name__)
+
+FBKeySchema = CustomAudience.Schema.MultiKeySchema
+
+# Max number of custom audience users we're allowed to send in one API call
+MAX_FB_AUDIENCE_API_USERS = 10000
+
+
+
[docs]class FacebookAds(object): + """ + Instantiate the FacebookAds class + + `Args:` + app_id: str + A Facebook app ID. Required if env var FB_APP_ID is not populated. + app_secret: str + A Facebook app secret. Required if env var FB_APP_SECRET is not populated. + access_token: str + A Facebook access token. Required if env var FB_ACCESS_TOKEN is not populated. + ad_account_id: str + A Facebook ad account ID. Required if env var FB_AD_ACCOUNT_ID isnot populated. + """ + + # The data columns that are valid for creating a custom audience. + # Feel free to add more variants to capture common column names, as long as they're fairly + # unambiguous. + # IMPORTANT - Keep these maps in sync with the comments in the ``add_users_to_custom_audience`` + # method! + # TODO add support for parsing full names from one column + KeyMatchMap = { + FBKeySchema.email: ['email', 'email address', 'voterbase_email'], + FBKeySchema.fn: ['fn', 'first', 'first name', 'vb_tsmart_first_name'], + FBKeySchema.ln: ['ln', 'last', 'last name', 'vb_tsmart_last_name'], + FBKeySchema.phone: [ + 'phone', + 'phone number', + 'cell', + 'landline', + 'vb_voterbase_phone', + 'vb_voterbase_phone_wireless' + ], + FBKeySchema.ct: ['ct', 'city', 'vb_vf_reg_city', 'vb_tsmart_city'], + FBKeySchema.st: [ + 'st', + 'state', + 'state code', + 'vb_vf_source_state', + 'vb_tsmart_state', + 'vb_vf_reg_state', + 'vb_vf_reg_cass_state' + ], + FBKeySchema.zip: ['zip', 'zip code', 'vb_vf_reg_zip', 'vb_tsmart_zip'], + FBKeySchema.country: ['country', 'country code'], + # Yes, it's not kosher to confuse gender and sex. However, gender is all that FB + # supports in their audience targeting. + FBKeySchema.gen: ['gen', 'gender', 'sex', 'vb_voterbase_gender'], + FBKeySchema.doby: ['doby', 'dob year', 'birth year'], + FBKeySchema.dobm: ['dobm', 'dob month', 'birth month'], + FBKeySchema.dobd: ['dobd', 'dob day', 'birth day'], + } + + PreprocessKeyMatchMap = { + # Data in this column will be parsed into the FBKeySchema.dobX keys. + "DOB YYYYMMDD": ['dob', 'vb_voterbase_dob', 'vb_tsmart_dob'] + } + + def __init__(self, app_id=None, app_secret=None, access_token=None, ad_account_id=None): + + try: + self.app_id = app_id or os.environ['FB_APP_ID'] + self.app_secret = app_secret or os.environ['FB_APP_SECRET'] + self.access_token = access_token or os.environ['FB_ACCESS_TOKEN'] + self.ad_account_id = ad_account_id or os.environ['FB_AD_ACCOUNT_ID'] + except KeyError as error: + logger.error("FB Marketing API credentials missing. Must be specified as env vars " + "or kwargs") + raise error + + FacebookAdsApi.init(self.app_id, self.app_secret, self.access_token) + self.ad_account = AdAccount("act_%s" % self.ad_account_id) + + @staticmethod + def _get_match_key_for_column(column): + # Finds a FB match key for a given table column. + + normalized_col = Table.get_normalized_column_name(column) + + for k, match_list in FacebookAds.KeyMatchMap.items(): + for match in match_list: + if normalized_col == Table.get_normalized_column_name(match): + return k + return None + + @staticmethod + def _get_preprocess_key_for_column(column): + normalized_col = Table.get_normalized_column_name(column) + + for k, match_list in FacebookAds.PreprocessKeyMatchMap.items(): + for match in match_list: + if normalized_col == Table.get_normalized_column_name(match): + return k + return None + + @staticmethod + def _preprocess_dob_column(table, column): + # Parse the DOB column into 3 new columns, and remove the original column + # TODO Throw an error if the values are not 6 characters long? + + table.add_column( + FBKeySchema.doby, + lambda row: row[column][:4] if row[column] else None + ) + table.add_column( + FBKeySchema.dobm, + lambda row: row[column][4:6] if row[column] else None + ) + table.add_column( + FBKeySchema.dobd, + lambda row: row[column][6:8] if row[column] else None + ) + table.remove_column(column) + + @staticmethod + def _preprocess_users_table(table): + # Handle columns that require special parsing + for column in table.columns: + preprocess_key = FacebookAds._get_preprocess_key_for_column(column) + if preprocess_key == "DOB YYYYMMDD": + FacebookAds._preprocess_dob_column(table, column) + return table + + @staticmethod + def _get_first_non_empty_value_from_dict(dict, cols): + for c in cols: + if dict[c]: + return dict[c] + return None + +
[docs] @staticmethod + def get_match_table_for_users_table(users_table): + """ + Prepared an input table for matching into a FB custom audience, by identifying which + columns are supported for matching, renaming those columns to what FB expects, and + cutting away the other columns. + + See ``FacebookAds.create_custom_audience`` for more details. + + `Args`: + users_table: Table + The source table for matching + + `Returns:` + Table + The prepared table + """ + + # Copy the table to avoid messing up the source table + t = copy.deepcopy(users_table) + + FacebookAds._preprocess_users_table(t) + + # Map the FB keys to whatever source columns match. + matched_cols = [] + fb_keys_to_orig_cols = collections.defaultdict(set) + for c in t.columns: + match_key = FacebookAds._get_match_key_for_column(c) + if match_key: + matched_cols.append(c) + fb_keys_to_orig_cols[match_key].add(c) + + # Cut the table to just the columns that we can use for matching in FB + t = t.cut(matched_cols) + + # For each of the FB match keys, create a new column from the source column. + # If there are more than one source cols for a given FB match key, we'll pick + # the first non-empty value for each row. + + for fb_key, orig_cols in fb_keys_to_orig_cols.items(): + value_fn = ( + lambda bound_cols: + lambda row: + FacebookAds._get_first_non_empty_value_from_dict(row, bound_cols) + )(orig_cols) + + # A little trickery here to handle the case where one of the "orig_cols" is already + # named like the "fb_key". + t.add_column(fb_key+"_fb_temp_col", value_fn) + t.remove_column(*orig_cols) + t.rename_column(fb_key+"_fb_temp_col", fb_key) + + # Convert None values to empty strings. Otherwise the FB SDK chokes. + petl_table = t.to_petl() + t = Table(petl_table.replaceall(None, '')) + + return t
+ + @staticmethod + def _get_match_schema_and_data(table): + # Grab the raw data as a list of tuples + data_list = [row for row in table.data] + return (table.columns, data_list) + + @staticmethod + def _is_valid_data_source(data_source): + valid_sources = [ + CustomAudience.CustomerFileSource.user_provided_only, + CustomAudience.CustomerFileSource.partner_provided_only, + CustomAudience.CustomerFileSource.both_user_and_partner_provided, + ] + return data_source in valid_sources + +
[docs] def create_custom_audience(self, name, data_source, description=None): + """ + Creates a FB custom audience. + + `Args:` + name: str + The name of the custom audience + data_source: str + One of ``USER_PROVIDED_ONLY``, ``PARTNER_PROVIDED_ONLY``, or + ``BOTH_USER_AND_PARTNER_PROVIDED``. + This tells FB whether the data for a custom audience was provided by actual users, + or acquired via partners. FB requires you to specify. + description: str + Optional. The description of the custom audience + + `Returns:` + ID of the created audience + """ + + if not self._is_valid_data_source(data_source): + raise KeyError("Invalid data_source provided") + + params = { + 'name': name, + 'subtype': 'CUSTOM', + 'description': description, + 'customer_file_source': data_source, + } + + res = self.ad_account.create_custom_audience(params=params) + return res['id']
+ +
[docs] def delete_custom_audience(self, audience_id): + """ + Deletes a FB custom audience. + + `Args:` + audience_id: str + The ID of the custom audience to delete. + """ + + CustomAudience(audience_id).api_delete()
+ + @staticmethod + def _add_batch_to_custom_audience(app_id, app_secret, access_token, audience_id, schema, + batch, added_so_far, total_rows): + # Since this method runs in parallel, we need to re-initialize the Facebook API each time + # to avoid SSL-related errors. Basically, the FacebookAdsApi python framework isn't + # built to run in parallel. + FacebookAdsApi.init(app_id, app_secret, access_token) + + # Note that the FB SDK handles basic normalization and hashing of the data + CustomAudience(audience_id).add_users(schema, batch, is_raw=True) + logger.info(f"Added {added_so_far+len(batch)}/{total_rows} users to custom audience...") + +
[docs] def add_users_to_custom_audience(self, audience_id, users_table): + """ + Adds user data to a custom audience. + + Each user row in the provided table should have at least one of the supported columns + defined. Otherwise the row will be ignored. Beyond that, the rows may have any other + non-supported columns filled out, and those will all be ignored. + + .. list-table:: + :widths: 20 80 + :header-rows: 1 + + * - Column Type + - Valid Column Names + * - Email Address + - ``email``, ``email address``, ``voterbase_email`` + * - First Name + - ``fn``, ``first``, ``first name``, ``vb_tsmart_first_name`` + * - Last Name + - ``ln``, ``last``, ``last name``, ``vb_tsmart_last_name`` + * - Phone Number + - ``phone``, ``phone number``, ``cell``, ``landline``, ``vb_voterbase_phone``, ``vb_voterbase_phone_wireless`` + * - City + - ``ct``, ``city``, ``vb_vf_reg_city``, ``vb_tsmart_city`` + * - State + - ``st``, ``state``, ``state code``, ``vb_vf_source_state``, ``vb_tsmart_state``, ``vb_vf_reg_state``, ``vb_vf_reg_cass_state`` + * - Zip Code + - ``zip``, ``zip code``, ``vb_vf_reg_zip``, ``vb_tsmart_zip`` + * - County + - ``country``, ``country code`` + * - Gender + - ``gen``, ``gender``, ``sex``, ``vb_vf_reg_zip`` + * - Birth Year + - ``doby``, ``dob year``, ``birth year`` + * - Birth Month + - ``dobm``, ``dob month``, ``birth month`` + * - Birth Day + - ``dobd``, ``dob day``, ``birth day`` + * - Date of Birth + - ``dob``, ``vb_voterbase_dob``, ``vb_tsmart_dob`` (Format: YYYYMMDD) + + The column names will be normalized before comparing to this list - eg. removing + whitespace and punctuation - so you don't need to match exactly. + + If more than one of your columns map to a single FB key, then for each row we'll use any + non-null value for those columns. + Eg. If you have both ``vb_voterbase_phone`` and ``vb_voterbase_phone_wireless`` (which + both map to the FB "phone" key), then for each person in your table, we'll try to pick one + valid phone number. + + For details of the expected data formats for each column type, see + `Facebook Audience API <https://developers.facebook.com/docs/marketing-api/audiences-api>`_, + under "Hashing and Normalization for Multi-Key". + + Note that you shouldn't have to do normalization on your data, as long as it's + reasonably close to what FB expects. Eg. It will convert "Male" to "m", and " JoSH" + to "josh". + + FB will attempt to match the data to users in their system. You won't be able to find out + which users were matched. But if you provide enough data, FB will tell you roughly how many + of them were matched. (You can find the custom audience in your business account at + https://business.facebook.com). + + Note that because FB's matching is so opaque, it will hide lots of data issues. Eg. if you + use "United States" instead of "US" for the "country" field, the API will appear to accept + it, when in reality it is probably ignoring that field. So read the docs if you're worried. + + `Args:` + audience_id: str + The ID of the custom audience to delete. + users_table: obj + Parsons table + + """ # noqa: E501,E261 + + logger.info(f"Adding custom audience users from provided table with " + f"{users_table.num_rows} rows") + + match_table = FacebookAds.get_match_table_for_users_table(users_table) + if not match_table.columns: + raise KeyError("No valid columns found for audience matching. " + "See FacebookAds.KeyMatchMap for supported columns") + + num_rows = match_table.num_rows + logger.info(f"Found {num_rows} rows with valid FB matching keys") + logger.info(f"Using FB matching keys: {match_table.columns}") + + (schema, data) = FacebookAds._get_match_schema_and_data(match_table) + + # Use the FB API to add users, respecting the limit per API call. + # Process and upload batches in parallel, to improve performance. + + batch_size = MAX_FB_AUDIENCE_API_USERS + + parallel_jobs = ( + delayed(FacebookAds._add_batch_to_custom_audience)( + self.app_id, self.app_secret, self.access_token, audience_id, schema, + data[i:i+batch_size], i, num_rows + ) + for i in range(0, len(data), batch_size) + ) + + n_jobs = os.environ.get('PARSONS_NUM_PARALLEL_JOBS', 4) + Parallel(n_jobs=n_jobs)(parallel_jobs)
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/google_civic/google_civic.html b/docs/html/_modules/parsons/google_civic/google_civic.html new file mode 100644 index 0000000000..6fc2acf317 --- /dev/null +++ b/docs/html/_modules/parsons/google_civic/google_civic.html @@ -0,0 +1,308 @@ + + + + + + + + + + + parsons.google_civic.google_civic — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.google_civic.google_civic
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.google_civic.google_civic

+
+import os
+import requests
+from parsons.etl.table import Table
+
+"""
+.. warning::
+    **The Google Civic Connector is an experimental feature.**
+"""
+
+
+
[docs]class GoogleCivic(object): + + def __init__(self, api_key=None, + uri='https://www.googleapis.com/civicinfo/v2/'): + """Returns the Google Civic class + + Can also be initiated by passing the GOOGLE_CIVIC_API env variable. + + `Args:` + api_key : str + A valid Google api key. + uri: str + Base uri to make api calls. + `Returns:` + object + A GoogleCivic from which to call functions + + """ + + if api_key is None: + + try: + self.api_key = os.environ['GOOGLE_CIVIC_API_KEY'] + except KeyError: + raise KeyError('No Google Civic API key found. Store as' + 'GOOGLE_CIVIC_API_KEY environment variable' + 'or pass as an argument') + + else: + self.api_key = api_key + + self.uri = uri + + def request(self, url, args=None): + + if not args: + args = {} + + args['key'] = self.api_key + + r = requests.get(url, params=args) + + return r.json() + +
[docs] def elections(self): + """Returns a collection of information about elections and voter information. + + `Returns:` + Table + Table of information about elections + + """ + + url = self.uri + 'elections' + + return Table((self.request(url))['elections'])
+ +
[docs] def voter_info(self, election_id, address): + """ + Returns up information relevant to a voter based on the voter's + registered address. The returned information may include: + + - Polling places (including early polling sites) for a given + residential street address + - Contest and candidate information + - Election official information + + `Args:` + election_id : int + The relevant election_id call with the elections method + address: str + An address to lookup. String is forgiving as Google will parse + it. + `Returns:` + Table + Table containing polling locations, candidates, election info + + """ + + url = self.uri + 'voterinfo' + + args = {'address': address, 'electionId': election_id} + + return Table(self.request(url, args=args))
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/logging/plogger.html b/docs/html/_modules/parsons/logging/plogger.html new file mode 100644 index 0000000000..421a202d4b --- /dev/null +++ b/docs/html/_modules/parsons/logging/plogger.html @@ -0,0 +1,406 @@ + + + + + + + + + + + parsons.logging.plogger — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.logging.plogger

+import logging
+import sys
+from slacker_log_handler import SlackerLogHandler
+
+LOG_LEVELS = {
+    'CRITICAL': logging.CRITICAL,
+    'ERROR': logging.ERROR,
+    'WARNING': logging.WARNING,
+    'INFO': logging.INFO,
+    'DEBUG': logging.DEBUG,
+    'NOTSET': logging.NOTSET
+}
+
+
+
[docs]class Logger(object): + + def __init__(self, name, handlers=None, level='INFO'): + """Return the Logger class. + + `Args:` + name : str + A name for the logger. + handlers: list + Optional; A list of handlers to set up the logger with. If + ``None`` default handlers will be added. + level: str + Optional; One of ``DEBUG``, ``INFO``, ``WARNING``, ``ERROR``, + or ``CRITICAL``. Defaults to ``INFO``. + `Returns:` + Logger object + """ + self.logger = logging.getLogger(name) + + self.name = self.logger.name + + if handlers: + self.add_handlers(handlers) + else: + self.add_default_handlers() + self.handlers = self.logger.handlers + + if level in LOG_LEVELS.keys(): + self.set_level(level) + elif level is True: + self.set_level('DEBUG') + else: + self.set_level('NOTSET') + self.level = self.logger.level + + def add_handlers(self, handlers): + for handler in handlers: + self.logger.addHandler(handler) + +
[docs] def add_handler(self, handler, level='INFO', sformat=None, formatter=None, + **kwargs): + """Add a handler to the logger. + + `Args:` + handler: Handler + A Handler object to attach to the logger. + level: str + Optional; One of ``DEBUG``, ``INFO``, ``WARNING``, ``ERROR``, + or ``CRITICAL``. Defaults to ``INFO``. + sformat: str + Optional; A string format for that can be passed into + ``logging.Formatter``. + formatter: Formatter + Optional; A Formatter object for formatting log events. + """ + hdlr = handler + if formatter: + hdlr.setFormatter(formatter) + elif sformat: + hdlr.setFormatter(logging.Formatter(sformat)) + hdlr.setLevel(LOG_LEVELS[level]) + self.logger.addHandler(hdlr) + self.handlers = self.logger.handlers
+ +
[docs] def add_stream_handler(self, stream, **kwargs): + """Add a stream handler to the logger. + + `Args:` + stream: Stream + A Stream object to attach to the logger. + """ + stream_handler = logging.StreamHandler(stream) + if 'level' not in kwargs: + kwargs['level'] = 'INFO' + + if 'sformat' not in kwargs: + kwargs['sformat'] = '%(message)s' + self.add_handler(stream_handler, **kwargs)
+ +
[docs] def add_file_handler(self, filename, **kwargs): + """Add a file handler to the logger. + + `Args:` + filename: str + The name of the file where log messages should be saved. + """ + file_handler = logging.FileHandler(filename) + if 'level' not in kwargs: + kwargs['level'] = 'DEBUG' + + if 'sformat' not in kwargs: + kwargs['sformat'] = \ + '%(asctime)s:%(name)s:%(levelname)s:%(message)s' + self.add_handler(file_handler, **kwargs)
+ +
[docs] def add_slack_handler(self, token, channel, **kwargs): + """Add a slack handler to the logger. + + `Args:` + token: str + The API token for a slack app that has chat.post scope + permissions. + channel: str + The *name* of the channel where message will be sent. + """ + slack_handler = SlackerLogHandler(token, channel) + if 'level' not in kwargs: + kwargs['level'] = 'ERROR' + self.add_handler(slack_handler, **kwargs)
+ +
[docs] def add_default_handlers(self): + """Add a set of predefined handlers to the logger. + + Adds a stream handler that send to ``sys.out`` and a file handler that + saves to ``<name>.log``. + """ + # Log to the console + self.add_stream_handler(sys.stdout) + + # Log to a log file + self.add_file_handler(f'{self.name}.log')
+ + def set_level(self, level): + self.logger.setLevel(LOG_LEVELS[level]) + self.level = self.logger.level + +
[docs] def debug(self, msg, *args, **kwargs): + """Log a debug message. + + `Args:` + msg: str + The message to log. + """ + self.logger.debug(msg, *args, **kwargs)
+ +
[docs] def info(self, msg, *args, **kwargs): + """Log an info message. + + `Args:` + msg: str + The message to log. + """ + self.logger.info(msg, *args, **kwargs)
+ +
[docs] def warning(self, msg, *args, **kwargs): + """Log a warning message. + + `Args:` + msg: str + The message to log. + """ + self.logger.warning(msg, *args, **kwargs)
+ +
[docs] def error(self, msg, *args, **kwargs): + """Log a error message. + + `Args:` + msg: str + The message to log. + """ + self.logger.error(msg, *args, **kwargs)
+ +
[docs] def critical(self, msg, *args, **kwargs): + """Log a critical message. + + `Args:` + msg: str + The message to log. + """ + self.logger.critical(msg, *args, **kwargs)
+ +
[docs] def exception(self, msg, *args, **kwargs): + """Log an exception message. + + `Args:` + msg: str + The message to log. + """ + self.logger.exception(msg, *args, **kwargs)
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/mobile_commons/campaigns.html b/docs/html/_modules/parsons/mobile_commons/campaigns.html new file mode 100644 index 0000000000..88627f48d6 --- /dev/null +++ b/docs/html/_modules/parsons/mobile_commons/campaigns.html @@ -0,0 +1,288 @@ + + + + + + + + + + + parsons.mobile_commons.campaigns — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.mobile_commons.campaigns
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.mobile_commons.campaigns

+"""Mobile Commons Campaings Endpoints."""
+
+
+
[docs]class Campaigns(object): + """Class for campaigns endpoints.""" + + def __init__(self, mc_connection): + """Initialize the Campaings class. + + `Args:` + mc_connection: MobileCommonsConnector + The connector to access the Mobile Commons API. + """ + self.connection = mc_connection + +
[docs] def campaigns(self, include_opt_in_paths=0, sort='asc', status=None, + campaign_id=None): + """Return a list of campaigns. + + `Args:` + include_opt_in_paths: int + Set to 1 to include all opt-in path details. Default is 0. + sort: str + Set to `asc` or `desc` to sort by campaign ID ascending or + descending. Default is ascending. + status: str + Set to active or ended to filter results. Default is empty and + returns all campaigns. + campaign_id: str + Provide a specific campaign ID to view single result, invalid + campaign ID will return all campaigns. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + url = self.connection.uri + 'campaigns' + + args = {'include_opt_in_paths': include_opt_in_paths, + 'sort': sort, + 'status': status, + 'campaign_id': campaign_id} + + response = self.connection.request(url, args=args, resp_type='xml') + + if response['response']['success'] == 'true': + return self.connection.output( + response['response']['campaigns']['campaign']) + else: + return None
+ +
[docs] def campaign(self, campaign_id, include_opt_in_paths=0): + """Return a single campaign. + + `Args:` + campaign_id: str + Provide a specific campaign ID to view single result, invalid + campaign ID will return all campaigns. + include_opt_in_paths: int + Set to 1 to include all opt-in path details. Default is 0. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + url = self.connection.uri + 'campaigns' + + args = {'include_opt_in_paths': include_opt_in_paths, + 'campaign_id': campaign_id} + + response = self.connection.request(url, args=args, resp_type='xml') + + if response['response']['success'] == 'true': + return self.connection.output( + response['response']['campaigns']['campaign']) + else: + return None
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/mobile_commons/groups.html b/docs/html/_modules/parsons/mobile_commons/groups.html new file mode 100644 index 0000000000..32ef263788 --- /dev/null +++ b/docs/html/_modules/parsons/mobile_commons/groups.html @@ -0,0 +1,385 @@ + + + + + + + + + + + parsons.mobile_commons.groups — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.mobile_commons.groups
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.mobile_commons.groups

+"""Mobile Commons Groups Endpoints."""
+
+
+
[docs]class Groups(object): + """Class for groups endpoints.""" + + def __init__(self, mc_connection): + """Initialize the Groups class. + + `Args:` + mc_connection: MobileCommonsConnector + The connector to access the Mobile Commons API. + """ + self.connection = mc_connection + +
[docs] def groups(self): + """Return a list of groups. + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + url = self.connection.uri + 'groups' + + response = self.connection.request(url, resp_type='xml') + + if response['response']['success'] == 'true': + if response['response']['groups']: + return self.connection.output( + response['response']['groups']['group']) + else: + return None + else: + return None
+ +
[docs] def group_members(self, group_id, limit=None, page=None, from_date=None, + to_date=None): + """Return a list of members in a group. + + `Args:` + group_id: int + Required; The primary key of the group. + limit: int + Optional; Limits the number of returned profiles. Maximum of + 1000. + page: int + Optional; Specifies which page, of the total number of pages of + results, to return. + from_date: str + Optional; Limits the returned profiles to ones updated after or + on this date time. ISO-8601 format. + to_date: str + Optional; Limits the returned profiles to ones updated before + or on this date time. ISO-8601 format. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + url = self.connection.uri + 'group_members' + + args = {'group_id': group_id, + 'limit': limit, + 'page': page, + 'from': from_date, + 'to': to_date} + + # TODO check with MC about pagination + + response = self.connection.request(url, args=args, resp_type='xml') + + if response['response']['success'] == 'true': + if response['response']['group']: + return self.connection.output( + response['response']['group']['profile']) + else: + return None + else: + return None
+ +
[docs] def group_create(self, name): + """Create a group. + + `Args:` + name: str + Required; The name for the new group. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + url = self.connection.uri + 'create_group' + + args = {'name': name} + + response = self.connection.request(url, args=args, resp_type='xml') + + if response['response']['success'] == 'true': + if response['response']['group']: + return self.connection.output(response['response']['group']) + else: + return None + else: + return None
+ +
[docs] def group_add_members(self, group_id, phone_numbers): + """Add a list of members to a group. + + `Args:` + group_id: int + Required; The primary key of the group. + phone_numbers: list + Required; A list of phone numbers to add to the group. + If the phone numbers don't exist, the will be created as + new profiles. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + url = self.connection.uri + 'add_group_member' + + if len(phone_numbers) == 0: + raise ValueError("At least 1 phone number is required.") + + if phone_numbers: + phone_number = ','.join(phone_numbers) + + args = {'group_id': group_id, + 'phone_number': phone_number} + + response = self.connection.request(url, args=args, resp_type='xml') + + if response['response']['success'] == 'true': + if response['response']['group']: + return self.connection.output(response['response']['group']) + else: + return None + else: + return None
+ +
[docs] def group_remove_members(self, group_id, phone_number): + """Remove a list of members from a group. + + `Args:` + group_id: int + Required; The primary key of the group. + phone_number: list + Required; A list of phone numbers to remove from the group. + If the phone number is not a member of the group, it will + still return the group. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + url = self.connection.uri + 'remove_group_member' + + if len(phone_number) == 0: + raise ValueError("At least 1 phone number is required.") + + if phone_number: + phone_number = ','.join(phone_number) + + args = {'group_id': group_id, + 'phone_number': phone_number} + + response = self.connection.request(url, args=args, resp_type='xml') + + if response['response']['success'] == 'true': + if response['response']['group']: + return self.connection.output(response['response']['group']) + else: + return None + else: + return None
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/mobile_commons/profiles.html b/docs/html/_modules/parsons/mobile_commons/profiles.html new file mode 100644 index 0000000000..90e9b45315 --- /dev/null +++ b/docs/html/_modules/parsons/mobile_commons/profiles.html @@ -0,0 +1,412 @@ + + + + + + + + + + + parsons.mobile_commons.profiles — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.mobile_commons.profiles
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.mobile_commons.profiles

+"""Mobile Commons Profiles Endpoints."""
+
+
+
[docs]class Profiles(object): + """Class for profiles endpoints.""" + + def __init__(self, mc_connection): + """Initialize the Profiles class. + + `Args:` + mc_connection: MobileCommonsConnector + The connector to access the Mobile Commons API. + """ + self.connection = mc_connection + +
[docs] def profiles(self, phone_number=None, from_date=None, to_date=None, + limit=None, page=None, include_custom_columns=None, + include_subscriptions=None, include_clicks=None, + include_members=None): + """Return a list of profiles. + + `Args:` + phone_number: list + Optional; Limits the returned profiles matching the provided + phone numbers. Phone numbers should be specified with country + code. + from_date: str + Optional; Limits the returned profiles to ones updated after + or on this date time. ISO-8601 format + to_date: str + Optional; Limits the returned profiles to ones updated before + or on this date time. ISO-8601 forma + limit: int + Optional; Limits the number of returned profiles. + Maximum of 1000 + page: int + Optional; Specifies which page, of the total number of pages + of results, to return + include_custom_columns: boolean + Optional; *Optional* default 'true' - allows exclusion of custom + columns associated with profiles, pass 'false' to limit + include_subscriptions: boolean + Optional; *Optional* default 'true' - allows exclusion of + subscriptions for each profile, pass 'false' to limit + include_clicks: boolean + Optional; *Optional* default 'true' - allows exclusion of clicks + include_members: boolean + Optional; *Optional* default 'true' - allows exclusion of + profile member records maintained for integrations + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + url = self.connection.uri + 'profiles' + + if phone_number: + phone_number = ','.join(phone_number) + + args = {'phone_number': phone_number, + 'from': from_date, + 'to': to_date, + 'limit': limit, + 'page': page, + 'include_custom_columns': include_custom_columns, + 'include_subscriptions': include_subscriptions, + 'include_clicks': include_clicks, + 'include_members': include_members} + + response = self.connection.request_paginate( + url, 'profiles', args=args, resp_type='xml') + + if response: + return self.connection.output(response) + else: + return None
+ +
[docs] def profile_get(self, phone_number, company=None, include_messages=False, + include_donations=False): + """Return a single profile record. + + `Args:` + phone_number: str + Required; The phone number for the profile to return. + company: str + Optional; If different that the one specified for the + connection. Default is the firm. + include_messages: boolean + Optional; Set to true to include associated text messages. + Default is false. + include_donations: boolean + Optional; Set to true to include associated mobile giving + donations, if any. Default is false. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + url = self.connection.uri + 'profile' + + args = {'phone_number': phone_number, + 'company': company, + 'include_messages': include_messages, + 'include_donations': include_donations} + + response = self.connection.request(url, args=args, resp_type='xml') + + if response['response']['success'] == 'true': + return self.connection.output(response['response']['profile']) + else: + return None
+ +
[docs] def profile_update(self, phone_number, email=None, postal_code=None, + first_name=None, last_name=None, street1=None, + street2=None, city=None, state=None, country=None, + custom_fields=None, opt_in_path_id=None): + """Create or update a profile. + + `Args:` + phone_number: str + Required; The phone number for the profile to update. + email: str + Optional; New email for the profile. + postal_code: str + Optional; New postal code for the profile. + first_name: str + Optional; New firstname for the profile. + last_name: str + Optional; New lastname for the profile. + street1: str + Optional; New street1 for the profile. + street2: str + Optional; New street2 for the profile. + city: str + Optional; New city for the profile. + state: str + Optional; New state for the profile. + country: str + Optional; New country for the profile. + custom_fields: dict + Optional; A dict of custom fields and their new values for the + profile. + opt_in_path_id: str + Optional; New opt_in_path_id for the profile. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + url = self.connection.uri + 'profile_update' + + post_data = { + 'phone_number': phone_number, + 'email': email, + 'postal_code': postal_code, + 'first_name': first_name, + 'last_name': last_name, + 'street1': street1, + 'street2': street2, + 'city': city, + 'state': state, + 'country': country, + 'custom_fields': custom_fields, + 'opt_in_path_id': opt_in_path_id} + + response = self.connection.request( + url, req_type='POST', post_data=post_data, resp_type='xml') + + if response['response']['success'] == 'true': + return self.connection.output(response['response']['profile']) + else: + return None
+ +
[docs] def profile_opt_out(self, phone_number, campaign_id=None, + subscription_id=None): + """Opt out a profile from a campaign, subscription or all. + + `Args:` + phone_number: str + Required; The phone number for the profile to opt out. + campaign_id: int + Optional; Opt-out this campaign only. Default is all campaigns. + subscription_id:int + Optional; Opt-out this subscription only. Default is all + subscriptions. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + url = self.connection.uri + 'profile_opt_out' + + post_data = {'phone_number': phone_number, + 'campaign_id': campaign_id, + 'subscription_id': subscription_id} + + response = self.connection.request( + url, req_type='POST', post_data=post_data, resp_type='xml') + + if response['response']['success'] == 'true': + return self.connection.output(response['response']['profile']) + else: + return None
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/mobilize_america/ma.html b/docs/html/_modules/parsons/mobilize_america/ma.html new file mode 100644 index 0000000000..82c02a4072 --- /dev/null +++ b/docs/html/_modules/parsons/mobilize_america/ma.html @@ -0,0 +1,572 @@ + + + + + + + + + + + parsons.mobilize_america.ma — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.mobilize_america.ma
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.mobilize_america.ma

+from requests import request as _request
+from parsons.etl.table import Table
+from parsons.utilities.datetime import date_to_timestamp
+import petl
+import re
+import os
+import logging
+
+logger = logging.getLogger(__name__)
+
+MA_URI = 'http://events.mobilizeamerica.io/api/v1/'
+
+
+
[docs]class MobilizeAmerica(object): + """ + Instantiate MobilizeAmerica Class + + api_key: str + An api key issued by Mobilize America. This is required to access some private methods. + + `Returns:` + MobilizeAmerica Class + """ + + def __init__(self, api_key=None): + + self.uri = MA_URI + self.api_key = api_key or os.environ.get('MOBILIZE_AMERICA_API_KEY') + + if not self.api_key: + logger.info('Mobilize America API Key missing. Calling methods that rely on private' + ' endpoints will fail.') + + def _request(self, url, req_type='GET', post_data=None, args=None, auth=False): + if auth: + + if not self.api_key: + raise TypeError('This method requires an api key.') + else: + header = {'Authorization': 'Bearer ' + self.api_key} + + else: + header = None + + r = _request(req_type, url, json=post_data, params=args, headers=header) + + if 'error' in r.json(): + raise ValueError('API Error:' + str(r.json()['error'])) + + return r + + def _request_paginate(self, url, req_type='GET', args=None, auth=False): + + r = self._request(url, req_type=req_type, args=args, auth=auth) + + json = r.json()['data'] + + while r.json()['next']: + + r = self._request(r.json()['next'], req_type=req_type) + json.extend(r.json()['data']) + + return json + + def _time_parse(self, time_arg): + # Parse the date filters + + trans = [('>=', 'gte_'), + ('>', 'gt_'), + ('<=', 'lte_'), + ('<', 'lt_')] + + if time_arg: + + time = re.sub('<=|<|>=|>', '', time_arg) + time = date_to_timestamp(time) + time_filter = re.search('<=|<|>=|>', time_arg).group() + + for i in trans: + if time_filter == i[0]: + return i[1] + str(time) + + raise ValueError('Invalid time operator. Must be one of >=, >, <= or >.') + + return time_arg + +
[docs] def get_organizations(self, updated_since=None): + """ + Return all active organizations on the platform. + + `Args:` + updated_since: str + Filter to organizations updated since given date (ISO Date) + `Returns` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + return Table(self._request_paginate(self.uri + 'organizations', + args={ + 'updated_since': date_to_timestamp(updated_since) + }))
+ +
[docs] def get_events(self, organization_id=None, updated_since=None, timeslot_start=None, + timeslot_end=None, timeslots_table=False, max_timeslots=None): + """ + Fetch all public events on the platform. + + `Args:` + organization_id: list or int + Filter events by a single or multiple organization ids + updated_since: str + Filter to events updated since given date (ISO Date) + timeslot_start: str + Filter by a timeslot start of events using ``>``,``>=``,``<``,``<=`` + operators and ISO date (ex. ``<=2018-12-13 05:00:00PM``) + timeslot_end: str + Filter by a timeslot end of events using ``>``,``>=``,``<``,``<=`` + operators and ISO date (ex. ``<=2018-12-13 05:00:00PM``) + timeslot_table: boolean + Return timeslots as a separate long table. Useful for extracting + to databases. + max_timeslots: int + If not returning a timeslot table, will unpack time slots. If do not + set this kwarg, it will add a column for each time slot. The argument + limits the number of columns and discards any additional timeslots + after that. + + For example: If there are 20 timeslots associated with your event, + and you set the max time slots to 5, it will only return the first 5 + time slots as ``time_slot_0``, ``time_slot_1`` etc. + + This is helpful in situations where you have a regular sync + running and want to ensure that the column headers remain static. + + `Returns` + Parsons Table or dict or Parsons Tables + See :ref:`parsons-table` for output options. + """ + + if isinstance(organization_id, (str, int)): + organization_id = [organization_id] + + args = {'organization_id': organization_id, + 'updated_since': date_to_timestamp(updated_since), + 'timeslot_start': self._time_parse(timeslot_start), + 'timeslot_end': self._time_parse(timeslot_end)} + + tbl = Table(self._request_paginate(self.uri + 'events', args=args)) + + if tbl.num_rows > 0: + + tbl.unpack_dict('sponsor') + tbl.unpack_dict('location', prepend=False) + tbl.unpack_dict('location', prepend=False) # Intentional duplicate + tbl.table = petl.convert(tbl.table, 'address_lines', lambda v: ' '.join(v)) + + if timeslots_table: + + timeslots_tbl = tbl.long_table(['id'], 'timeslots', 'event_id') + return {'events': tbl, 'timeslots': timeslots_tbl} + + else: + tbl.unpack_list('timeslots', replace=True, max_columns=max_timeslots) + cols = tbl.columns + for c in cols: + if re.search('timeslots', c, re.IGNORECASE) is not None: + tbl.unpack_dict(c) + + return tbl
+ +
[docs] def get_events_organization(self, organization_id=None, updated_since=None, timeslot_start=None, + timeslot_end=None, timeslots_table=False, max_timeslots=None): + """ + Fetch all public events for an organization. This includes both events owned + by the organization (as indicated by the organization field on the event object) + and events of other organizations promoted by this specified organization. + + .. note:: + API Key Required + + `Args:` + organization_id: list or int + Filter events by a single or multiple organization ids + updated_since: str + Filter to events updated since given date (ISO Date) + timeslot_start: str + Filter by a timeslot start of events using ``>``,``>=``,``<``,``<=`` + operators and ISO date (ex. ``<=2018-12-13 05:00:00PM``) + timeslot_end: str + Filter by a timeslot end of events using ``>``,``>=``,``<``,``<=`` + operators and ISO date (ex. ``<=2018-12-13 05:00:00PM``) + timeslot_table: boolean + Return timeslots as a separate long table. Useful for extracting + to databases. + zipcode: str + Filter by a Events' Locations' postal code. If present, returns Events + sorted by distance from zipcode. If present, virtual events will not be returned. + max_dist: str + Filter Events' Locations' distance from provided zipcode. + visibility: str + Either `PUBLIC` or `PRIVATE`. Private events only return if user is authenticated; + if `visibility=PRIVATE` and user doesn't have permission, no events returned. + exclude_full: bool + If `exclude_full=true`, filter out full Timeslots (and Events if all of an Event's + Timeslots are full) + is_virtual: bool + `is_virtual=false` will return only in-person events, while `is_virtual=true` will + return only virtual events. If excluded, return virtual and in-person events. Note + that providing a zipcode also implies `is_virtual=false`. + event_types:enum + The type of the event, one of: `CANVASS`, `PHONE_BANK`, `TEXT_BANK`, `MEETING`, + `COMMUNITY`, `FUNDRAISER`, `MEET_GREET`, `HOUSE_PARTY`, `VOTER_REG`, `TRAINING`, + `FRIEND_TO_FRIEND_OUTREACH`, `DEBATE_WATCH_PARTY`, `ADVOCACY_CALL`, `OTHER`. + This list may expand in the future. + max_timeslots: int + If not returning a timeslot table, will unpack time slots. If do not + set this arg, it will add a column for each time slot. The argument + limits the number of columns and discards any additional timeslots + after that. + + For example: If there are 20 timeslots associated with your event, + and you set the max time slots to 5, it will only return the first 5 + time slots as ``time_slot_0``, ``time_slot_1`` etc. + + This is helpful in situations where you have a regular sync + running and want to ensure that the column headers remain static. + + `Returns` + Parsons Table or dict or Parsons Tables + See :ref:`parsons-table` for output options. + """ + + if isinstance(organization_id, (str, int)): + organization_id = [organization_id] + + args = {'organization_id': organization_id, + 'updated_since': date_to_timestamp(updated_since), + 'timeslot_start': self._time_parse(timeslot_start), + 'timeslot_end': self._time_parse(timeslot_end), + } + + tbl = Table(self._request_paginate(self.uri + 'events', args=args, auth=True)) + + if tbl.num_rows > 0: + + tbl.unpack_dict('sponsor') + tbl.unpack_dict('location', prepend=False) + tbl.unpack_dict('location', prepend=False) # Intentional duplicate + tbl.table = petl.convert(tbl.table, 'address_lines', lambda v: ' '.join(v)) + + if timeslots_table: + + timeslots_tbl = tbl.long_table(['id'], 'timeslots', 'event_id') + return {'events': tbl, 'timeslots': timeslots_tbl} + + else: + tbl.unpack_list('timeslots', replace=True, max_columns=max_timeslots) + cols = tbl.columns + for c in cols: + if re.search('timeslots', c, re.IGNORECASE) is not None: + tbl.unpack_dict(c) + + return tbl
+ +
[docs] def get_events_deleted(self, organization_id=None, updated_since=None): + """ + Fetch deleted public events on the platform. + + `Args:` + organization_id: list or int + Filter events by a single or multiple organization ids + updated_since: str + Filter to events updated since given date (ISO Date) + `Returns` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + if isinstance(organization_id, (str, int)): + organization_id = [organization_id] + + args = {'organization_id': organization_id, + 'updated_since': date_to_timestamp(updated_since)} + + return Table(self._request_paginate(self.uri + 'events/deleted', args=args))
+ +
[docs] def get_people(self, organization_id=None, updated_since=None): + """ + Fetch all people (volunteers) who are affiliated with the organization. + + .. note:: + API Key Required + + `Args:` + organization_id: list of int + Filter events by a single or multiple organization ids + updated_since: str + Filter to events updated since given date (ISO Date) + `Returns` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + url = self.uri + 'organizations/' + str(organization_id) + '/people' + args = {'updated_since': date_to_timestamp(updated_since)} + return Table(self._request_paginate(url, args=args, auth=True))
+ +
[docs] def get_attendances(self, organization_id=None, updated_since=None): + """ + Fetch all attendances which were either promoted by the organization or + were for events owned by the organization. + + .. note:: + API Key Required + + `Args:` + organization_id: list of int + Filter events by a single or multiple organization ids + updated_since: str + Filter to events updated since given date (ISO Date) + `Returns` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + url = self.uri + 'organizations/' + str(organization_id) + '/attendances' + args = {'updated_since': date_to_timestamp(updated_since)} + return Table(self._request_paginate(url, args=args, auth=True))
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/ngpvan/activist_codes.html b/docs/html/_modules/parsons/ngpvan/activist_codes.html new file mode 100644 index 0000000000..fcc99a695f --- /dev/null +++ b/docs/html/_modules/parsons/ngpvan/activist_codes.html @@ -0,0 +1,341 @@ + + + + + + + + + + + parsons.ngpvan.activist_codes — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.ngpvan.activist_codes
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.ngpvan.activist_codes

+"""NGPVAN Activist Code Endpoints"""
+
+from parsons.etl.table import Table
+from parsons.ngpvan.utilities import action_parse
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class ActivistCodes(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def get_activist_codes(self): + """ + Get activist codes. + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('activistCodes')) + logger.info(f'Found {tbl.num_rows} activist codes.') + return tbl
+ +
[docs] def get_activist_code(self, activist_code_id): + """ + Get an activist code. + + `Args:` + activist_code_id : int + The activist code id. + `Returns:` + dict + The activist code + """ + + r = self.connection.get_request(f'activistCodes/{activist_code_id}') + logger.info(f'Found activist code {activist_code_id}.') + return r
+ + def toggle_activist_code(self, id, activist_code_id, action, id_type='vanid'): + # Internal method to apply/remove activist codes. Was previously a public method, + # but for the sake of simplicity, breaking out into two public methods. + + response = {"activistCodeId": activist_code_id, + "action": action_parse(action), + "type": "activistCode"} + + r = self.apply_response(id, response, id_type) + + logger.info(f'{id_type.upper()} {id} {action.capitalize()} ' + + f'activist code {activist_code_id}') + + return r + +
[docs] def apply_activist_code(self, id, activist_code_id, id_type='vanid'): + """ + Apply an activist code to or from a person. + + `Args:` + id: str + A valid person id + activist_code_id: int + A valid activist code id + action: str + Either 'apply' or 'remove' + id_type: str + A known person identifier type available on this VAN instance + such as ``dwid`` + Returns: + ``None`` + """ + + return self.toggle_activist_code(id, activist_code_id, 'Apply', id_type=id_type)
+ +
[docs] def remove_activist_code(self, id, activist_code_id, id_type='vanid'): + """ + Remove an activist code to or from a person. + + `Args:` + id: str + A valid person id + activist_code_id: int + A valid activist code id + action: str + Either 'apply' or 'remove' + id_type: str + A known person identifier type available on this VAN instance + such as ``dwid`` + Returns: + ``None`` + """ + + return self.toggle_activist_code(id, activist_code_id, 'Remove', id_type=id_type)
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/ngpvan/canvass_responses.html b/docs/html/_modules/parsons/ngpvan/canvass_responses.html new file mode 100644 index 0000000000..0135b8ed2c --- /dev/null +++ b/docs/html/_modules/parsons/ngpvan/canvass_responses.html @@ -0,0 +1,295 @@ + + + + + + + + + + + parsons.ngpvan.canvass_responses — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.ngpvan.canvass_responses
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.ngpvan.canvass_responses

+"""NGPVAN Canvass Responses Endpoints"""
+
+from parsons.etl.table import Table
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class CanvassResponses(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def get_canvass_responses_contact_types(self): + """ + Get canvass response contact types. + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('canvassResponses/contactTypes')) + logger.info(f'Found {tbl.num_rows} canvass response contact types.') + return tbl
+ +
[docs] def get_canvass_responses_input_types(self): + """ + Get canvass response input types. + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('canvassResponses/inputTypes')) + logger.info(f'Found {tbl.num_rows} canvass response input types.') + return tbl
+ +
[docs] def get_canvass_responses_result_codes(self): + """ + Get canvass response result codes. + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('canvassResponses/resultCodes')) + logger.info(f'Found {tbl.num_rows} canvass response result codes.') + return tbl
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/ngpvan/codes.html b/docs/html/_modules/parsons/ngpvan/codes.html new file mode 100644 index 0000000000..03d9591619 --- /dev/null +++ b/docs/html/_modules/parsons/ngpvan/codes.html @@ -0,0 +1,454 @@ + + + + + + + + + + + parsons.ngpvan.codes — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.ngpvan.codes

+"""NGPVAN Code Endpoints"""
+from parsons.etl.table import Table
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class Codes(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def get_codes(self, name=None, supported_entities=None, parent_code_id=None, + code_type=None): + """ + Get codes. + + `Args:` + name : str + Filter by name of code. + supported_entities: str + Filter by supported entities. + parent_code_id: str + Filter by parent code id. + code_type: str + Filter by code type. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + params = {'name': name, + 'supportedEntities': supported_entities, + 'parentCodeId': parent_code_id, + 'codeType': code_type, + '$top': 200 + } + + tbl = Table(self.connection.get_request('codes', params=params)) + logger.info(f'Found {tbl.num_rows} codes.') + return tbl
+ +
[docs] def get_code(self, code_id): + """ + Get a code. + + `Args:` + code_id : int + The code id. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + c = self.connection.request(f'codes/{code_id}') + logger.debug(c) + logger.info(f'Found code {code_id}.') + return c
+ +
[docs] def get_code_types(self): + """ + Get code types. + + `Returns:` + list + A list of code types. + """ + + lst = self.connection.get_request('codeTypes') + logger.info(f'Found {len(lst)} code types.') + return lst
+ +
[docs] def create_code(self, name=None, parent_code_id=None, description=None, + code_type='SourceCode', supported_entities=None): + """ + Create a code. + + `Args:` + name: str + The name of the code. + parent_code_id: int + A unique identifier for this code’s parent. + description: str + A description for this code, no longer than 200 characters. + code_type: str + The code type. ``Tag`` and ``SourceCode`` are valid values. + supported_entities: list + A list of dicts that enumerate the searchability and applicability rules of the + code. You can find supported entities with the :meth:`code_supported_entities` + + .. highlight:: python + .. code-block:: python + + [ + { + 'name': 'Event', + 'is_searchable': True, + 'is_applicable': True + } + { + 'name': 'Locations', + 'start_time': '12-31-2018T13:00:00', + 'end_time': '12-31-2018T14:00:00' + } + ] + """ + + json = {"parentCodeId": parent_code_id, + "name": name, + "codeType": code_type, + "description": description} + + if supported_entities: + + se = [{'name': s['name'], + 'isSearchable': s['is_searchable'], + 'is_applicable': s['is_applicable']} for s in supported_entities] + + json['supportedEntities'] = se + + r = self.connection.post_request('codes', json=json) + logger.info(f'Code {r} created.') + return r
+ +
[docs] def update_code(self, code_id, name=None, parent_code_id=None, description=None, + code_type='SourceCode', supported_entities=None): + """ + Update a code. + + `Args:` + code_id: int + The code id. + name: str + The name of the code. + parent_code_id: int + A unique identifier for this code’s parent. + description: str + A description for this code, no longer than 200 characters. + code_type: str + The code type. ``Tag`` and ``SourceCode`` are valid values. + supported_entities: list + A list of dicts that enumerate the searchability and applicability rules of the + code. You can find supported entities with the :meth:`code_supported_entities` + + .. highlight:: python + .. code-block:: python + + [ + { + 'name': 'Event', + 'is_searchable': True, + 'is_applicable': True + } + { + 'name': 'Locations', + 'start_time': '12-31-2018T13:00:00', + 'end_time': '12-31-2018T14:00:00' + } + ] + """ + + post_data = {} + + if name: + post_data['name'] = name + if parent_code_id: + post_data['parentCodeId'] = parent_code_id + if code_type: + post_data['codeType'] = code_type + if description: + post_data['description'] = description + + if supported_entities: + + se = [{'name': s['name'], + 'isSearchable': s['is_searchable'], + 'is_applicable': s['is_applicable']} for s in supported_entities] + post_data['supportedEntities'] = se + + r = self.connection.put_request(f'codes/{code_id}', json=post_data) + logger.info(f'Code {code_id} updated.') + return r
+ +
[docs] def delete_code(self, code_id): + """ + Delete a code. + + `Args:` + code_id: int + The code id. + `Returns:` + ``None`` + """ + + r = self.connection.delete_request(f'codes/{code_id}') + logger.info(f'Code {code_id} deleted.') + return r
+ +
[docs] def get_code_supported_entities(self): + """ + Get code supported entities. + + `Returns:` + list + A list of code supported entities. + """ + + lst = self.connection.get_request('codes/supportedEntities') + logger.info(f'Found {len(lst)} code supported entities.') + return lst
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/ngpvan/events.html b/docs/html/_modules/parsons/ngpvan/events.html new file mode 100644 index 0000000000..075510c662 --- /dev/null +++ b/docs/html/_modules/parsons/ngpvan/events.html @@ -0,0 +1,479 @@ + + + + + + + + + + + parsons.ngpvan.events — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.ngpvan.events

+"""NGPVAN Events Endpoints"""
+
+from parsons.etl.table import Table
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class Events(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def get_events(self, code_ids=None, event_type_ids=None, rep_event_id=None, + starting_after=None, starting_before=None, district_field=None, + expand_fields=['locations', 'codes', 'shifts', 'roles', 'notes', + 'financialProgram', 'ticketCategories', + 'onlineForms']): + """ + Get events. + + `Args:` + code_ids: str + Filter by code id. + event_type_ids: str + Filter by event_type_ids. + rep_event_id: str + Filters to recurring events that are recurrences the passed event id. + starting_after: str + Events beginning after ``iso8601`` formatted date. + starting_before: str + Events beginning before ``iso8601`` formatted date. + district_field: str + Filter by district field. + expand_fields: list + A list of fields for which to include data. If a field is omitted, + ``None`` will be returned for that field. Can be ``locations``, ``codes``, + ``shifts``,``roles``, ``notes``, ``financialProgram``, ``ticketCategories``, + ``onlineForms``. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + if expand_fields: + expand_fields = ','.join(expand_fields) + + params = {'codeIds': code_ids, + 'eventTypeIds': event_type_ids, + 'inRepetitionWithEventId': rep_event_id, + 'startingAfter': starting_after, + 'startingBefore': starting_before, + 'districtFieldValue': district_field, + '$top': 50, + '$expand': expand_fields + } + + tbl = Table(self.connection.get_request('events', params=params)) + logger.info(f'Found {tbl.num_rows} events.') + return tbl
+ +
[docs] def get_event(self, event_id, expand_fields=['locations', 'codes', 'shifts', 'roles', + 'notes', 'financialProgram', 'ticketCategories', + 'voterRegistrationBatches']): + """ + Get an event. + + `Args:` + event_id: int + The event id. + expand_fields: list + A list of fields for which to include data. If a field is omitted, + ``None`` will be returned for that field. Can be ``locations``, + ``codes``, ``shifts``, ``roles``, ``notes``, ``financialProgram``, + ``ticketCategories``, ``voterRegistrationBatches`.` + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + if expand_fields: + expand_fields = ','.join(expand_fields) + + r = self.connection.get_request(f'events/{event_id}', params={'$expand': expand_fields}) + logger.info(f'Found event {event_id}.') + return r
+ +
[docs] def create_event(self, name, short_name, start_date, end_date, event_type_id, + roles, shifts=None, description=None, editable=False, + publicly_viewable=False, location_ids=None, code_ids=None, notes=None, + district_field_value=None, voter_registration_batches=None): + """ + Create an event + + `Args:` + name: str + A name for this event, no longer than 500 characters. + short_name: str + A shorter name for this event, no longer than 12 characters. + start_date: str + The start date and time for this event. + end_date: str + The end date and time for this event that is after ``start_date`` + event_type_id: int + A valid event type id. + roles: list + A list of valid role ids that correspond the with the event type. + shifts: + A list of dicts with shifts formatted as: + + .. highlight:: python + .. code-block:: python + + [ + { + 'name': 'Shift 1', + 'start_time': '12-31-2018T12:00:00', + 'end_time': '12-31-2018T13:00:00' + } + { + 'name': 'Shift 2', + 'start_time': '12-31-2018T13:00:00', + 'end_time': '12-31-2018T14:00:00' + } + ] + + description: str + An optional description for this Event, no longer than 500 characters. + editable: boolean + If ``True``, prevents modification of this event by any users other than the + user associated the API key. Setting this to true effectively makes + the event read-only in the VAN interface. + publicly_viewable: boolean + Used by NGP VAN’s website platform to indicate whether this event can be + viewed publicly. + location_ids: list + A list of location_ids where the event is taking place + code_ids: list + A list of codes that are applied to this event for organizational purposes. Note + that at most one source code and any number of tags, may be applied to an event. + notes: list + A list of notes + `Returns:` + int + The event code. + """ + + if shifts is None: + shifts = [{'name': 'Default Shift', + 'startTime': start_date, + 'endTime': end_date}] + else: + shifts = [{'name': s['name'], + 'startTime': s['start_time'], + 'endTime': s['end_time']} for s in shifts] + + event = {'name': name, + 'shortName': short_name, + 'description': description, + 'startDate': start_date, + 'endDate': end_date, + 'eventType': {'eventTypeId': event_type_id}, + 'isOnlyEditableByCreatingUser': str(editable).lower(), + 'isPubliclyViewable': publicly_viewable, + 'notes': notes, + 'shifts': shifts, + 'roles': [{'roleId': r} for r in roles], + 'districtFieldValue': district_field_value, + 'voterRegistrationBatches': voter_registration_batches + } + + if location_ids: + event['locations'] = [{'locationId': l} for l in location_ids], + + if code_ids: + event['codes'] = [{'codeID': c} for c in code_ids] + + r = self.connection.post_request('events', json=event) + logger.info(f'Event {r} created.') + return r
+ +
[docs] def delete_event(self, event_id): + """ + Delete an event. + + `Args:` + event_id: int + The event id. + `Returns:` + ``None`` + """ + + r = self.connection.delete_request(f'events/{event_id}') + logger.info(f'Event {event_id} deleted.') + return r
+ +
[docs] def add_event_shift(self, event_id, shift_name, start_time, end_time): + """ + Add shifts to an event + + `Args:` + event_id: int + The event id. + shift_name: str + The name of the shift + start_time: str + The start time for the shift (``iso8601`` formatted date). + end_time: str + The end time of the shift (``iso8601`` formatted date). + `Returns:` + int + The shift id. + """ + + shift = {'name': shift_name, + 'startTime': start_time, + 'endTime': end_time + } + + r = self.connection.post_request(f'events/{event_id}/shifts', json=shift) + logger.info(f'Shift {r} added.') + return r
+ +
[docs] def get_event_types(self): + """ + Get event types. + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('events/types')) + logger.info(f'Found {tbl.num_rows} events.') + return tbl
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/ngpvan/locations.html b/docs/html/_modules/parsons/ngpvan/locations.html new file mode 100644 index 0000000000..aad33b3fc6 --- /dev/null +++ b/docs/html/_modules/parsons/ngpvan/locations.html @@ -0,0 +1,352 @@ + + + + + + + + + + + parsons.ngpvan.locations — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.ngpvan.locations
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.ngpvan.locations

+"""NGPVAN Locations Endpoints"""
+
+from parsons.etl.table import Table
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class Locations(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def get_locations(self, name=None): + """ + Get locations. + + `Args:` + name: str + Filter locations by name. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('locations', params={'name': name})) + logger.info(f'Found {tbl.num_rows} locations.') + return self._unpack_loc(tbl)
+ +
[docs] def get_location(self, location_id): + """ + Get a location. + + `Args:` + location_id: int + The location id. + `Returns:` + dict + """ + + r = self.connection.get_request(f'locations/{location_id}') + logger.info(f'Found location {location_id}.') + return r
+ +
[docs] def create_location(self, name, address_line1=None, address_line2=None, city=None, + state=None, zip_code=None): + """ + Find or create a location. If location already exists, will return location id. + + `Args:` + name: str + A name for this location, no longer than 50 characters. + address_line1: str + First line of a street address. + address_line2: str + Second line of a street address. + city: str + City or town name. + state: str + Two or three character state or province code (e.g., MN, ON, NSW, etc.). + zip_code: str + ZIP, ZIP+4, Postal Code, Post code, etc. + `Returns:` + int + A location id. + """ + + location = {'name': name, + 'address': { + 'addressLine1': address_line1, + 'addressLine2': address_line2, + 'city': city, + 'stateOrProvince': state, + 'zipOrPostalCode': zip_code + }} + + r = self.connection.post_request(f'locations/findOrCreate', json=location) + logger.info(f'Location {r} created.') + return r
+ +
[docs] def delete_location(self, location_id): + """ + Delete a location. + + `Args:` + location_id: int + The location id + `Returns:` + ``None`` + """ + + r = self.connection.delete_request(f'locations/{location_id}') + logger.info(f'Location {location_id} deleted.') + return r
+ + def _unpack_loc(self, table): + # Internal method to unpack location json + + if isinstance(table, tuple): + return table + + if 'address' in table.columns: + table.unpack_dict('address', prepend=False) + + if 'geoLocation' in table.columns: + table.unpack_dict('geoLocation', prepend=False) + + return table
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/ngpvan/people.html b/docs/html/_modules/parsons/ngpvan/people.html new file mode 100644 index 0000000000..ff24e078e1 --- /dev/null +++ b/docs/html/_modules/parsons/ngpvan/people.html @@ -0,0 +1,802 @@ + + + + + + + + + + + parsons.ngpvan.people — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.ngpvan.people

+from parsons.utilities import json_format
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class People(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def find_person(self, first_name=None, last_name=None, date_of_birth=None, email=None, + phone=None, phone_type=None, street_number=None, street_name=None, zip=None): + """ + Find a person record. + + .. note:: + Person find must include the following minimum combinations to conduct + a search. + + - first_name, last_name, email + - first_name, last_name, phone + - first_name, last_name, zip5, date_of_birth + - first_name, last_name, street_number, street_name, zip5 + - email_address + + `Args:` + first_name: str + The person's first name + last_name: str + The person's last name + dob: str + ISO 8601 formatted date of birth (e.g. ``1981-02-01``) + email: str + The person's email address + phone: str + Phone number of any type (Work, Cell, Home) + street_number: str + Street Number + street_name: str + Street Name + zip: str + 5 digit zip code + `Returns:` + A person dict object + """ + + logger.info(f'Finding {first_name} {last_name}.') + + return self._people_search( + first_name=first_name, + last_name=last_name, + date_of_birth=date_of_birth, + email=email, + phone=phone, + phone_type=phone_type, + street_number=street_number, + street_name=street_name, + zip=zip + )
+ +
[docs] def find_person_json(self, match_json): + """ + Find a person record based on json data. + + .. note:: + Person find must include the following minimum combinations to conduct + a search. + + - first_name, last_name, email + - first_name, last_name, phone + - first_name, last_name, zip5, date_of_birth + - first_name, last_name, street_number, street_name, zip5 + - email_address + + .. note:: + A full list of possible values for the json, and its structure can be found + `here <https://docs.ngpvan.com/reference/people#common-models>`_. + + `Args:` + match_json: dict + A dictionary of values to match against. + fields: The fields to return. Leave as default for all available fields + `Returns:` + A person dict object + """ + + logger.info(f'Finding a match for json details.') + + return self._people_search(match_json=match_json)
+ +
[docs] def update_person(self, id=None, id_type='vanid', first_name=None, last_name=None, + date_of_birth=None, email=None, phone=None, phone_type=None, + street_number=None, street_name=None, zip=None): + """ + Update a person record based on a provided ID. All other arguments provided will be + updated on the record. + + .. warning:: + This method can only be run on MyMembers, EveryAction, MyCampaign databases. + + `Args:` + id: str + A valid id + id_type: str + A known person identifier type available on this VAN instance. + Defaults to ``vanid``. + first_name: str + The person's first name + last_name: str + The person's last name + dob: str + ISO 8601 formatted date of birth (e.g. ``1981-02-01``) + email: str + The person's email address + phone: str + Phone number of any type (Work, Cell, Home) + phone_type: str + One of 'H' for home phone, 'W' for work phone, 'C' for cell, 'M' for + main phone or 'F' for fax line. Defaults to home phone. + street_number: str + Street Number + street_name: str + Street Name + zip: str + 5 digit zip code + `Returns:` + A person dict + """ + + return self._people_search( + id=id, + id_type=id_type, + first_name=first_name, + last_name=last_name, + date_of_birth=date_of_birth, + email=email, + phone=phone, + phone_type=phone_type, + street_number=street_number, + street_name=street_name, + zip=zip, + create=True + )
+ +
[docs] def update_person_json(self, id, id_type='vanid', match_json=None): + """ + Update a person record based on a provided ID within the match_json dict. + + .. note:: + A full list of possible values for the json, and its structure can be found + `here <https://docs.ngpvan.com/reference/people#common-models>`_. + + `Args:` + id: str + A valid id + id_type: str + A known person identifier type available on this VAN instance. + Defaults to ``vanid``. + match_json: dict + A dictionary of values to match against and save. + `Returns:` + A person dict + """ + + return self._people_search(id=id, id_type=id_type, match_json=match_json, create=True)
+ +
[docs] def upsert_person(self, first_name=None, last_name=None, date_of_birth=None, email=None, + phone=None, phone_type=None, street_number=None, street_name=None, zip=None): + """ + Create or update a person record. + + .. note:: + Person find must include the following minimum combinations. + - first_name, last_name, email + - first_name, last_name, phone + - first_name, last_name, zip5, date_of_birth + - first_name, last_name, street_number, street_name, zip5 + - email_address + + .. warning:: + This method can only be run on MyMembers, EveryAction, MyCampaign databases. + + `Args:` + first_name: str + The person's first name + last_name: str + The person's last name + dob: str + ISO 8601 formatted date of birth (e.g. ``1981-02-01``) + email: str + The person's email address + phone: str + Phone number of any type (Work, Cell, Home) + phone_type: str + One of 'H' for home phone, 'W' for work phone, 'C' for cell, 'M' for + main phone or 'F' for fax line. Defaults to home phone. + street_number: str + Street Number + street_name: str + Street Name + zip: str + 5 digit zip code + `Returns:` + A person dict + """ + + return self._people_search( + first_name=first_name, + last_name=last_name, + date_of_birth=date_of_birth, + email=email, + phone=phone, + phone_type=phone_type, + street_number=street_number, + street_name=street_name, + zip=zip, + create=True + )
+ +
[docs] def upsert_person_json(self, match_json): + """ + Create or update a person record. + + .. note:: + Person find must include the following minimum combinations. + - first_name, last_name, email + - first_name, last_name, phone + - first_name, last_name, zip5, date_of_birth + - first_name, last_name, street_number, street_name, zip5 + - email_address + + .. note:: + A full list of possible values for the json, and its structure can be found + `here <https://docs.ngpvan.com/reference/people#common-models>`_. `vanId` can + be passed to ensure the correct record is updated. + + .. warning:: + This method can only be run on MyMembers, EveryAction, MyCampaign databases. + + `Args:` + match_json: dict + A dictionary of values to match against and save. + `Returns:` + A person dict + """ + + return self._people_search(match_json=match_json, create=True)
+ + def _people_search(self, id=None, id_type=None, first_name=None, last_name=None, + date_of_birth=None, email=None, phone=None, phone_type='H', + street_number=None, street_name=None, zip=None, match_json=None, + create=False): + # Internal method to hit the people find/create endpoints + + addressLine1 = None + if street_name and street_number: + addressLine1 = f'{street_number} {street_name}' + + # Check to see if a match map has been provided + if not match_json: + json = {"firstName": first_name, "lastName": last_name} + + # Will fail if empty dicts are provided, hence needed to add if exist + if email: + json['emails'] = [{'email': email}] + if phone: # To Do: Strip out non-integers from phone + json['phones'] = [{'phoneNumber': phone, 'phoneType': phone_type}] + if date_of_birth: + json['dateOfBirth'] = date_of_birth + if zip or addressLine1: + json['addresses'] = [{}] + if zip: + json['addresses'][0]['zipOrPostalCode'] = zip + if addressLine1: + json['addresses'][0]['addressLine1'] = addressLine1 + else: + json = match_json + if 'vanId' in match_json: + id = match_json['vanId'] + + url = 'people/' + + if id: + + if create: + id_type = '' if id_type in ('vanid', None) else f"{id_type}:" + url += id_type + str(id) + else: + return self.get_person(id, id_type=id_type) + + else: + url += 'find' + + if create: + url += 'OrCreate' + else: + # Ensure that the minimum combination of fields were passed + json_flat = json_format.flatten_json(json) + self._valid_search(**json_flat) + + return self.connection.post_request(url, json=json) + + def _valid_search(self, firstName=None, lastName=None, email=None, phoneNumber=None, + dateOfBirth=None, addressLine1=None, zipOrPostalCode=None, **kwargs): + # Internal method to check if a search is valid, kwargs are ignored + + if (None in [firstName, lastName, email] and + None in [firstName, lastName, phoneNumber] and + None in [firstName, lastName, zipOrPostalCode, dateOfBirth] and + None in [firstName, lastName, addressLine1, zipOrPostalCode] and + None in [email]): + + raise ValueError(""" + Person find must include the following minimum + combinations to conduct a search. + - first_name, last_name, email + - first_name, last_name, phone + - first_name, last_name, zip, dob + - first_name, last_name, street_number, street_name, zip + - email + """) + + return True + +
[docs] def get_person(self, id, id_type='vanid', expand_fields=[ + 'contribution_history', 'addresses', 'phones', 'emails', + 'codes', 'custom_fields', 'external_ids', 'preferences', + 'recorded_addresses', 'reported_demographics', 'suppressions', + 'cases', 'custom_properties', 'districts', 'election_records', + 'membership_statuses', 'notes', 'organization_roles', + 'disclosure_field_values']): + """ + Returns a single person record using their VANID or external id. + + `Args:` + id: str + A valid id + id_type: str + A known person identifier type available on this VAN instance + such as ``dwid``. Defaults to ``vanid``. + expand_fields: list + A list of fields for which to include data. If a field is omitted, + ``None`` will be returned for that field. Can be ``contribution_history``, + ``addresses``, ``phones``, ``emails``, ``codes``, ``custom_fields``, + ``external_ids``, ``preferences``, ``recorded_addresses``, + ``reported_demographics``, ``suppressions``, ``cases``, ``custom_properties``, + ``districts``, ``election_records``, ``membership_statuses``, ``notes``, + ``organization_roles``, ``scores``, ``disclosure_field_values``. + `Returns:` + A person dict + """ + + # Change end point based on id type + url = 'people/' + + id_type = '' if id_type in ('vanid', None) else f"{id_type}:" + url += id_type + str(id) + + expand_fields = ','.join([json_format.arg_format(f) for f in expand_fields]) + + # Removing the fields that are not returned in MyVoters + NOT_IN_MYVOTERS = ['codes', 'contribution_history', 'organization_roles'] + + if self.connection.db_code == 0: + expand_fields = [v for v in expand_fields if v not in NOT_IN_MYVOTERS] + + logger.info(f'Getting person with {id_type} of {id} at url {url}') + return self.connection.get_request(url, params={'$expand': expand_fields})
+ +
[docs] def apply_canvass_result(self, id, result_code_id, id_type='vanid', contact_type_id=None, + input_type_id=None, date_canvassed=None): + """ + Apply a canvass result to a person. Use this end point for attempts that do not + result in a survey response or an activist code (e.g. Not Home). + + `Args:` + id: str + A valid person id + result_code_id : int + Specifies the result code of the attempt. Valid ids can be found + by using the :meth:`get_canvass_responses_result_codes` + id_type: str + A known person identifier type available on this VAN instance + such as ``dwid`` + contact_type_id : int + `Optional`; A valid contact type id + input_type_id : int + `Optional`; Defaults to 11 (API Input) + date_canvassed : str + `Optional`; ISO 8601 formatted date. Defaults to todays date + `Returns:` + ``None`` + """ + + logger.info(f'Applying result code {result_code_id} to {id_type} {id}.') + self.apply_response(id, None, id_type=id_type, contact_type_id=contact_type_id, + input_type_id=input_type_id, date_canvassed=date_canvassed, + result_code_id=result_code_id)
+ +
[docs] def toggle_volunteer_action(self, id, volunteer_activity_id, action, id_type='vanid', + result_code_id=None, contact_type_id=None, input_type_id=None, + date_canvassed=None): + """ + Apply or remove a volunteer action to or from a person. + + `Args:` + id: str + A valid person id + id_type: str + A known person identifier type available on this VAN instance + such as ``dwid`` + volunteer_activity_id: int + A valid volunteer activity id + action: str + Either 'apply' or 'remove' + result_code_id : int + `Optional`; Specifies the result code of the response. If + not included,responses must be specified. Conversely, if + responses are specified, result_code_id must be null. Valid ids + can be found by using the :meth:`get_canvass_responses_result_codes` + contact_type_id: int + `Optional`; A valid contact type id + input_type_id: int + `Optional`; Defaults to 11 (API Input) + date_canvassed: str + `Optional`; ISO 8601 formatted date. Defaults to todays date + + ** NOT IMPLEMENTED ** + """ + + """ + response = {"volunteerActivityId": volunteer_activity_id, + "action": self._action_parse(action), + "type": "VolunteerActivity"} + + logger.info(f'{action} volunteer activity {volunteer_activity_id} to {id_type} {id}') + self.apply_response(id, response, id_type, contact_type_id, input_type_id, date_canvassed, + result_code_id) + """
+ +
[docs] def apply_response(self, id, response, id_type='vanid', contact_type_id=None, + input_type_id=None, date_canvassed=None, result_code_id=None): + """ + Apply responses such as survey questions, activist codes, and volunteer actions + to a person record. This method allows you apply multiple responses (e.g. two survey + questions) at the same time. It is a low level method that requires that you + conform to the VAN API `response object format <https://docs.ngpvan.com/reference/people#peoplevanidcanvassresponses>`_. + + `Args:` + id: str + A valid person id + response: dict + A list of dicts with each dict containing a valid action. + id_type: str + A known person identifier type available on this VAN instance + such as ``dwid`` + result_code_id : int + `Optional`; Specifies the result code of the response. If + not included,responses must be specified. Conversely, if + responses are specified, result_code_id must be null. Valid ids + can be found by using the :meth:`get_canvass_responses_result_codes` + contact_type_id : int + `Optional`; A valid contact type id + input_type_id : int + `Optional`; Defaults to 11 (API Input) + date_canvassed : str + `Optional`; ISO 8601 formatted date. Defaults to todays date + responses : list or dict + `Returns:` + ``True`` if successful + + .. code-block:: python + + response = [{"activistCodeId": 18917, + "action": "Apply", + "type": "ActivistCode"}, + {"surveyQuestionId": 109149, + "surveyResponseId": 465468, + "action": "SurveyResponse"} + ] + van.apply_response(5222, response) + """ # noqa: E501,E261 + + # Set url based on id_type + if id_type == 'vanid': + url = f"people/{id}/canvassResponses" + else: + url = f"people/{id_type}:{id}/canvassResponses" + + json = {"canvassContext": { + "contactTypeId": contact_type_id, + "inputTypeId": input_type_id, + "dateCanvassed": date_canvassed}, + "resultCodeId": result_code_id} + + if response: + json['responses'] = response + + if result_code_id is not None and response is not None: + raise ValueError("Both result_code_id and responses cannot be specified.") + + if isinstance(response, dict): + json["responses"] = [response] + + if result_code_id is not None and response is not None: + raise ValueError( + "Both result_code_id and responses cannot be specified.") + + return self.connection.post_request(url, json=json)
+ +
[docs] def create_relationship(self, vanid_1, vanid_2, relationship_id): + """ + Create a relationship between two individuals + + `Args:` + vanid_1 : int + The vanid of the primary individual; aka the node + vanid_2 : int + The vanid of the secondary individual; the spoke + relationship_id : int + The relationship id indicating the type of relationship + `Returns:` + ``None`` + """ + + json = {'relationshipId': relationship_id, + 'vanId': vanid_2} + + self.connection.post_request(f"people/{vanid_1}/relationships", json=json) + logger.info('Relationship {vanid_1} to {vanid_2} created.')
+ +
[docs] def apply_person_code(self, id, code_id, id_type='vanid'): + """ + Apply a code to a person. + + `Args:` + id: str + A valid person id. + code_id: int + A valid code id. + id_type: str + A known person identifier type available on this VAN instance + such as ``dwid`` + `Returns:` + ``None`` + """ + + # Set url based on id_type + if id_type == 'vanid': + url = f"people/{id}/codes" + else: + url = f"people/{id_type}:{id}/codes" + + json = {"codeId": code_id} + + self.connection.post_request(url, json=json) + logger.info(f'Code {code_id} applied to person id {id}.')
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/ngpvan/saved_lists.html b/docs/html/_modules/parsons/ngpvan/saved_lists.html new file mode 100644 index 0000000000..ed24d30f20 --- /dev/null +++ b/docs/html/_modules/parsons/ngpvan/saved_lists.html @@ -0,0 +1,490 @@ + + + + + + + + + + + parsons.ngpvan.saved_lists — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.ngpvan.saved_lists
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.ngpvan.saved_lists

+"""NGPVAN Saved List Endpoints"""
+
+from parsons.etl.table import Table
+from parsons.utilities import cloud_storage
+import logging
+import uuid
+from suds.client import Client
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class SavedLists(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def get_saved_lists(self, folder_id=None): + """ + Get saved lists. + + `Args:` + folder_id: int + Filter by the id for a VAN folder. If included returns only + the saved lists in the folder + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('savedLists', params={'folderId': folder_id})) + logger.info(f'Found {tbl.num_rows} saved lists.') + return tbl
+ +
[docs] def get_saved_list(self, saved_list_id): + """ + Returns a saved list object. + + `Args:` + saved_list_id: int + The saved list id. + `Returns:` + dict + """ + + r = self.connection.get_request(f'savedLists/{saved_list_id}') + logger.info(f'Found saved list {saved_list_id}.') + return r
+ +
[docs] def download_saved_list(self, saved_list_id): + """ + Download the vanids associated with a saved list. + + `Args:` + saved_list_id: int + The saved list id. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + ej = ExportJobs(self.connection) + job = ej.export_job_create(saved_list_id) + + if isinstance(job, tuple): + return job + else: + return Table.from_csv(job['downloadUrl'])
+ +
[docs] def upload_saved_list(self, tbl, list_name, folder_id, url_type, id_type='vanid', replace=False, + **url_kwargs): + """ + Upload a saved list. Invalid or unmatched person id records will be ignored. Your api user + must be shared on the target folder. + + `Args:` + tbl: parsons.Table + A parsons table object containing one column of person ids. + list_name: str + The saved list name. + folder_id: int + The folder id where the list will be stored. + url_post_type: str + The cloud file storage to use to post the file. Currently only ``S3``. + id_type: str + The primary key type. The options, beyond ``vanid`` are specific to your + instance of VAN. + replace: boolean + Replace saved list if already exists. + **url_kwargs: kwargs + Arguments to configure your cloud storage url type. + * S3 requires ``bucket`` argument and, if not stored as env variables + ``aws_access_key`` and ``aws_secret_access_key``. + `Returns:` + dict + Upload results information included the number of matched and saved + records in your list. + """ + + # Move to cloud storage + file_name = str(uuid.uuid1()) + url = cloud_storage.post_file(tbl, url_type, file_path=file_name + '.zip', **url_kwargs) + logger.info(f'Table uploaded to {url_type}.') + + # Create XML + xml = self.connection.soap_client.factory.create('CreateAndStoreSavedListMetaData') + xml.SavedList._Name = list_name + xml.DestinationFolder._ID = folder_id + xml.SourceFile.FileName = file_name + '.csv' + xml.SourceFile.FileUrl = url + xml.SourceFile.FileCompression = 'zip' + xml.Options.OverwriteExistingList = replace + + # Describe file + file_desc = self.connection.soap_client.factory.create('SeparatedFileFormatDescription') + file_desc._name = 'csv' + file_desc.HasHeaderRow = True + + # Only support single column for now + col = self.connection.soap_client.factory.create('Column') + col.Name = id_type + col.RefersTo._Path = f"Person[@PersonIDType=\'{id_type}\']" + col._Index = '0' + + # VAN errors for this method are not particularly useful or helpful. For that reason, we + # will check that the folder exists and if the list already exists. + logger.info('Validating folder id and list name.') + if folder_id not in [x['folderId'] for x in self.get_folders()]: + raise ValueError("Folder does not exist or is not shared with API user.") + + if not replace: + if list_name in [x['name'] for x in self.get_saved_lists(folder_id)]: + raise ValueError("Saved list already exists. Set to replace argument to True or " + "change list name.") + + # Assemble request + file_desc.Columns.Column.append(col) + xml.SourceFile.Format = file_desc + + r = Client.dict(self.connection.soap_client.service.CreateAndStoreSavedList(xml)) + if r: + logger.info(f"Uploaded {r['ListSize']} records to {r['_Name']} saved list.") + return r
+ + +
[docs]class Folders(object): + + def __init__(self, van_connection): + + # Some sort of test if the van_connection is not present. + + self.connection = van_connection + +
[docs] def get_folders(self): + """ + Get all folders owned or shared with the API user. + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('folders')) + logger.info(f'Found {tbl.num_rows} folders.') + return tbl
+ +
[docs] def get_folder(self, folder_id): + """ + Get a folder owned by or shared with the API user. + + `Args:` + folder_id: int + The folder id. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + r = self.connection.get_request(f'folders/{folder_id}') + logger.info(f'Found folder {folder_id}.') + return r
+ + +
[docs]class ExportJobs(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def get_export_job_types(self): + """ + Get export job types + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('exportJobTypes')) + logger.info(f'Found {tbl.num_rows} export job types.') + return tbl
+ +
[docs] def export_job_create(self, list_id, export_type=4, + webhookUrl="https://www.nothing.com"): + """ + Creates an export job + + Currently, this is only used for exporting saved lists. It is + recommended that you use the :meth:`saved_list_download` method + instead. + + `Args:` + list_id: int + This is where you should input the list id + export_type: int + The export type id, which defines the columns to export + webhookUrl: + A webhook to include to notify as to the status of the export + `Returns:` + dict + The export job object + """ + + json = {"savedListId": str(list_id), + "type": str(export_type), + "webhookUrl": webhookUrl + } + + r = self.connection.post_request('exportJobs', json=json) + logger.info('Retrieved export job.') + return r
+ +
[docs] def get_export_job(self, export_job_id): + """ + Get an export job. + + `Args:` + export_job_id: int + The xxport job id. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + r = self.connection.get_request(f'exportJobs/{export_job_id}') + logger.info(f'Found export job {export_job_id}.') + return r
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/ngpvan/scores.html b/docs/html/_modules/parsons/ngpvan/scores.html new file mode 100644 index 0000000000..8f0738cb2d --- /dev/null +++ b/docs/html/_modules/parsons/ngpvan/scores.html @@ -0,0 +1,625 @@ + + + + + + + + + + + parsons.ngpvan.scores — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.ngpvan.scores

+"""NGPVAN Score Endpoints"""
+
+from parsons.etl.table import Table
+from parsons.utilities import cloud_storage
+import uuid
+import logging
+import petl
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class Scores(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def get_scores(self): + """ + Get all scores. + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('scores')) + logger.info(f'Found {tbl.num_rows} scores.') + return tbl
+ +
[docs] def get_score(self, score_id): + """ + Get an individual score. + + `Args:` + score_id: int + The score id + `Returns:` + dict + """ + + r = self.connection.get_request(f'scores/{score_id}') + logger.info(f'Found score {score_id}.') + return r
+ +
[docs] def get_score_updates(self, created_before=None, created_after=None, score_id=None): + """ + Get score updates. + + `Args:` + created_before: str + Filter score updates to those created before date. Use "YYYY-MM-DD" + format. + created_after: str + Filter score updates to those created after date. Use "YYYY-MM-DD" + format. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + params = {'createdBefore': created_before, + 'createdAfter': created_after, + 'scoreId': score_id} + + tbl = Table(self.connection.get_request('scoreUpdates', params=params)) + if tbl.num_rows: + tbl.unpack_dict('updateStatistics', prepend=False) + tbl.unpack_dict('score', prepend=False) + logger.info(f'Found {tbl.num_rows} score updates.') + return tbl
+ +
[docs] def get_score_update(self, score_update_id): + """ + Get a score update object + + `Args:` + score_update_id : int + The score update id + `Returns:` + dict + """ + + r = self.connection.get_request(f'scoreUpdates/{score_update_id}') + logger.info(f'Returning score update {score_update_id}.') + return r
+ +
[docs] def update_score_status(self, score_update_id, status): + """ + Change the status of a score update object. This end point is used to + approve a score loading job. + + `Args:` + score_update_id: str + The score update id + status: str + One of 'pending approval', 'approved', 'disapproved' + `Returns:` + ``None`` + """ + + if status not in ['pending approval', 'approved', 'disapproved', + 'canceled']: + + raise ValueError("""Valid inputs for status are, 'pending approval', + 'approved','disapproved','canceled'""") + + else: + if status == 'pending approval': + status = 'PendingApproval' + else: + status = status.capitalize() + + json = {"loadStatus": status} + + r = self.connection.patch_request(f'scoreUpdates/{score_update_id}', json=json) + logger.info(f'Score {score_update_id} status updated to {status}.') + return r
+ +
[docs] def upload_scores(self, tbl, config, url_type, id_type='vanid', email=None, auto_approve=True, + approve_tolerance=.1, **url_kwargs): + """ + Upload scores. Use to create or overwrite scores. Multiple score loads + should be configured in a single call. [1]_ + + `Args:` + tbl: object + A parsons.Table object. The table must contain the scores and first column in the + table must contain the primary key (e.g. vanid). + config: list + The score configuration. A list of dictionaries in which you specify the following + + .. list-table:: + :widths: 20 80 + :header-rows: 0 + + * - ``score_column`` + - The name of the column where the score is housed. + * - ``score_id`` + - The score slot id. + + Example: + + .. highlight:: python + .. code-block:: python + + [{'score1_id' : int, score1_column': str} + {'score2_id' : int, score2_column': str}] + + url_type: str + The cloud file storage to use to post the file. + See :ref:`Cloud Storage <cloud-storage>` for more details. + email: str + An email address to send job load status updates. + auto_approve: boolean + If the scores are within the expected tolerance of deviation from the + average values provided, then score will be automatically approved. + approve_tolderance: float + The deviation from the average scores allowed in order to automatically + approve the score. Maximum of .1. + **url_kwargs: kwargs + Arguments to configure your cloud storage url type. See + :ref:`Cloud Storage <cloud-storage>` for more details. + `Returns:` + int + The score load job id. + + .. [1] NGPVAN asks that you load multiple scores in a single call to reduce the load + on their servers. + """ + + # Move to cloud storage + file_name = str(uuid.uuid1()) + url = cloud_storage.post_file(tbl, url_type, file_path=file_name + '.zip', **url_kwargs) + logger.info(f'Table uploaded to {url_type}.') + + # Generate shell request + json = {"description": 'A description', + "file": { + "columnDelimiter": 'csv', + "columns": [{'name': c} for c in tbl.columns], + "fileName": file_name + '.csv', + "hasHeader": "True", + "hasQuotes": "False", + "sourceUrl": url}, + "actions": [] + } + + # Configure each score + for i in config: + action = {"actionType": "score", + "personIdColumn": tbl.columns[0], + "personIdType": id_type, + "scoreColumn": i['score_column'], + "scoreId": i['score_id']} + + if auto_approve: + average = petl.stats(tbl.table, i['score_column']).mean + action['approvalCriteria'] = {"average": average, "tolerance": approve_tolerance} + + json['actions'].append(action) + + # Add email listener + if email: + json['listeners'] = [{"type": "EMAIL", 'value': email}] + + # Upload scores + r = self.connection.post_request('fileLoadingJobs', json=json) + logger.info(f"Scores job {r['jobId']} created.") + return r['jobId']
+ + +
[docs]class FileLoadingJobs(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def create_file_load(self, file_name, file_url, columns, id_column, id_type, + score_id, score_column, delimiter='csv', header=True, quotes=True, + description=None, email=None, auto_average=None, + auto_tolerance=None): + """ + .. warning:: + .. deprecated:: 0.7 Use :func:`parsons.VAN.upload_scores` instead. + + Loads a file. Only used for loading scores at this time. Scores must be + compressed using `zip`. + + `Args:` + file_name: str + The name of the file contained in the zip file. + file_url: str + The url path to directly download the file. Can also be a path to an FTP site. + columns: list + A list of column names contained in the file. + id_column: str + The column name of the id column in the file. + id_type: str + A valid primary key, such as `VANID` or `DWID`. Varies by VAN instance. + score_id: int + The score slot id + score_column: str + The column holding the score + delimiter: str + The file delimiter used. + email: str + A valid email address in which file loading status will be sent. + auto_average: float + The average of scores to be loaded. + auto_tolerance: float + The fault tolerance of the VAN calculated average compared to the ``auto_average``. + The tolerance must be less than 10% of the difference between the maximum and + minimum possible acceptable values of the score. + `Returns:` + dict + The file load id + """ + + columns = [{'name': c} for c in columns] + + # To Do: Validate that it is a .zip file. Not entirely sure if this is possible + # as some urls might not end in ".zip". + + if delimiter not in ['csv', 'tab', 'pipe']: + raise ValueError("Delimiter must be one of 'csv', 'tab' or 'pipe'") + + delimiter = delimiter.capitalize() + + json = {"description": 'A description', + "file": { + "columnDelimiter": delimiter, + "columns": columns, + "fileName": file_name, + "hasHeader": header, + "hasQuotes": quotes, + "sourceUrl": file_url + }, + "actions": [ + {"actionType": "score", + "personIdColumn": id_column, + "personIdType": id_type, + "scoreColumn": score_column, + "scoreId": score_id}], + "listeners": [ + {"type": "EMAIL", + "value": email}] + } + + if auto_average and auto_tolerance: + + json["actions"]["approvalCriteria"] = {"average": auto_average, + "tolerance": auto_tolerance} + + r = self.connection.post_request('fileLoadingJobs', json=json)['jobId'] + logger.info(f'Score loading job {r} created.') + return r
+ +
[docs] def create_file_load_multi(self, file_name, file_url, columns, id_column, id_type, + score_map, delimiter='csv', header=True, quotes=True, + description=None, email=None): + """ + .. warning:: + .. deprecated:: 0.7 Use :func:`parsons.VAN.upload_scores` instead. + + An iteration of the :meth:`file_load` method that allows you to load multiple scores + at the same time. + + `Args:` + file_name : str + The name of the file contained in the zip file. + file_url : str + The url path to directly download the file. Can also be a path to an FTP site. + columns: list + A list of column names contained in the file. + id_column : str + The column name of the id column in the file. + id_type : str + A valid primary key, such as `VANID` or `DWID`. Varies by VAN instance. + score_map : list + A list of dicts that adheres to the following syntax + + .. highlight:: python + .. code-block:: python + + [{'score_id' : int, + 'score_column': str, + 'auto_average': float, + 'auto_tolerance': float }] + + email: str + A valid email address in which file loading status will be sent. + `Returns:` + The file load job id + """ + + columns = [{'name': c} for c in columns] + + # To Do: Validate that it is a .zip file. Not entirely sure if this is possible + # as some urls might not end in ".zip". + + if delimiter not in ['csv', 'tab', 'pipe']: + raise ValueError("Delimiter must be one of 'csv', 'tab' or 'pipe'") + + delimiter = delimiter.capitalize() + + json = {"description": 'A description', + "file": { + "columnDelimiter": delimiter, + "columns": columns, + "fileName": file_name, + "hasHeader": header, + "hasQuotes": quotes, + "sourceUrl": file_url + }, + "listeners": [ + {"type": "EMAIL", + "value": email}] + } + + actions = [] + + for score in score_map: + + action = {"actionType": "score", + "personIdColumn": id_column, + "personIdType": id_type, + "scoreColumn": score['score_column'], + "scoreId": score['score_id'], + "approvalCriteria": { + "average": score['auto_average'], + "tolerance": score['auto_tolerance'] + } + } + + actions.append(action) + + json['actions'] = actions + + r = self.connection.post_request('fileLoadingJobs', json=json)['jobId'] + logger.info(f'Score loading job {r} created.') + return r
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/ngpvan/signups.html b/docs/html/_modules/parsons/ngpvan/signups.html new file mode 100644 index 0000000000..8d9046e061 --- /dev/null +++ b/docs/html/_modules/parsons/ngpvan/signups.html @@ -0,0 +1,431 @@ + + + + + + + + + + + parsons.ngpvan.signups — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.ngpvan.signups

+"""NGPVAN Signups Endpoints"""
+from parsons.etl.table import Table
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class Signups(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def get_signups_statuses(self, event_id=None, event_type_id=None): + """ + Get a list of valid signup statuses for a given event type + or event. You must pass one of ``event_id`` or ``event_type_id`` + but not both. + + `Args:` + event_id: int + A valid event id. + event_type_id: int + A valid event type id. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + if event_id is None and event_type_id is None: + raise ValueError('One of event_id or event_type_id must be populated') + + if event_id is not None and event_type_id is not None: + raise ValueError('Event Id and Event Type ID may not BOTH be populated') + + if event_id: + params = {'eventId': event_id} + if event_type_id: + params = {'eventTypeId': event_type_id} + + tbl = Table(self.connection.get_request('signups/statuses', params=params)) + logger.info(f'Found {tbl.num_rows} signups.') + return tbl
+ +
[docs] def get_person_signups(self, vanid): + """ + Get the signup history of a person. + + `Args:` + vanid: int + A valid vanid associated with a person. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('signups', params={'vanID': vanid})) + logger.info(f'Found {tbl.num_rows} signups for {vanid}.') + return self._unpack_signups(tbl)
+ +
[docs] def get_event_signups(self, event_id): + """ + Get the signup history of an event. + + `Args:` + event_id: int + A valid event_id associated with an event + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('signups', params={'eventId': event_id})) + logger.info(f'Found {tbl.num_rows} signups for event {event_id}.') + return self._unpack_signups(tbl)
+ +
[docs] def get_signup(self, event_signup_id): + """ + Get a single signup object. + + `Args:` + event_signup_id: int + A valid event_signup_id associated with a signup. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + r = self.connection.get_request(f'signups/{event_signup_id}') + logger.info(f'Found sign up {event_signup_id}.') + return r
+ +
[docs] def create_signup(self, vanid, event_id, shift_id, role_id, status_id, location_id): + """ + Create a new signup for an event. + + `Args:` + vanid: int + A valid vanid of the person to signup for the event. + event_id: int + A valid event_id to associate the person with the event + shift_id: + A shift_id, associated with the event to assign the person + role_id: + A role_id, associated with the event to assign the person + status_id: + A status_id of the person + location_id: + A location_id for the event + `Returns:` + Int + The event signup id + """ + + signup = {'person': {'vanId': vanid}, + 'event': {'eventId': event_id}, + 'shift': {'eventShiftId': shift_id}, + 'role': {'roleId': role_id}, + 'status': {'statusId': status_id}, + 'location': {'locationId': location_id} + } + + r = self.connection.post_request('signups', json=signup) + logger.info('Signup {r} created.') + return r
+ +
[docs] def update_signup(self, event_signup_id, shift_id=None, role_id=None, status_id=None, + location_id=None): + """ + Update a signup object. All of the kwargs will update the values associated + with them. + + `Args:` + event_signup_id: int + A valid event signup id + shift_id: int + The shift_id to update + role_id: int + The role_id to update + status_id: int + The status_id to update + location_id: int + The location_id to update + `Returns:` + ``None`` + """ + + # Get the signup object + signup = self.connection.get_request(f'signups/{event_signup_id}') + + # Update the signup object + if shift_id: + signup['shift'] = {'eventShiftId': shift_id} + if role_id: + signup['role'] = {'roleId': role_id} + if status_id: + signup['status'] = {'statusId': status_id} + if location_id: + signup['location'] = {'locationId': location_id} + + return self.connection.put_request(f'signups/{event_signup_id}', json=signup)
+ +
[docs] def delete_signup(self, event_signup_id): + """ + Delete a signup object + + `Args:` + event_signup_id: int + A valid event signup id + `Returns:` + ``None`` + """ + + r = self.connection.delete_request(f'signups/{event_signup_id}') + logger.info(f'Signup {event_signup_id} deleted.') + return r
+ + def _unpack_signups(self, table): + + # Unpack all of the nested jsons + table.unpack_dict('person', prepend=False) + table.unpack_dict('status') + table.unpack_dict('event') + table.unpack_dict('shift') + table.unpack_dict('role') + table.unpack_dict('location') + + return table
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/ngpvan/supporter_groups.html b/docs/html/_modules/parsons/ngpvan/supporter_groups.html new file mode 100644 index 0000000000..1d74a9653d --- /dev/null +++ b/docs/html/_modules/parsons/ngpvan/supporter_groups.html @@ -0,0 +1,350 @@ + + + + + + + + + + + parsons.ngpvan.supporter_groups — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.ngpvan.supporter_groups
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.ngpvan.supporter_groups

+"""NGPVAN Supporter Groups Endpoints"""
+from parsons.etl.table import Table
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class SupporterGroups(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def get_supporter_groups(self): + """ + Get supporter groups. + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = Table(self.connection.get_request('supporterGroups')) + logger.info(f'Found {tbl.num_rows} supporter groups.') + return tbl
+ +
[docs] def get_supporter_group(self, supporter_group_id): + """ + Get a supporter group. + + `Args:` + supporter_group_id: int + The supporter group id. + `Returns:` + dict + """ + + r = self.connection.get_request(f'supporterGroups/{supporter_group_id}') + logger.info(f'Found supporter group {supporter_group_id}.') + return r
+ +
[docs] def create_supporter_group(self, name, description): + """ + Create a new supporter group. + + `Args:` + name: str + The name of the supporter group. 100 character limit + description: str + Optional; A description of the supporter group. 200 character limit. + `Returns` + Parsons Table with the newly createed supporter group id, name + and description + """ + + json = {'name': name, 'description': description} + r = self.connection.post_request('supporterGroups', json=json) + return r
+ +
[docs] def delete_supporter_group(self, supporter_group_id): + """ + Delete a supporter group. + + `Args:` + supporter_group_id: int + The supporter group id + `Returns:` + ``None`` + """ + + r = self.connection.delete_request(f'supporterGroups/{supporter_group_id}') + logger.info(f'Deleted supporter group {supporter_group_id}.') + return r
+ +
[docs] def add_person_supporter_group(self, supporter_group_id, vanid): + """ + Add a person to a supporter group + + `Args:` + supporter_group_id: int + The supporter group id + vanid: int + The vanid of the person to apply + `Returns:` + ``None`` + """ + + r = self.connection.put_request(f'supporterGroups/{supporter_group_id}/people/{vanid}') + logger.info(f'Added person {vanid} to {supporter_group_id} supporter group.') + return r
+ +
[docs] def delete_person_supporter_group(self, supporter_group_id, vanid): + """ + Remove a person from a supporter group + + `Args:` + supporter_group_id: int + The supporter group id + vanid: int + The vanid of the person to remove + `Returns:` + ``None`` + """ + + r = self.connection.delete_request(f'supporterGroups/{supporter_group_id}/people/{vanid}') + logger.info(f'Deleted person {vanid} from {supporter_group_id} supporter group.') + return r
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/ngpvan/survey_questions.html b/docs/html/_modules/parsons/ngpvan/survey_questions.html new file mode 100644 index 0000000000..d469c93dca --- /dev/null +++ b/docs/html/_modules/parsons/ngpvan/survey_questions.html @@ -0,0 +1,342 @@ + + + + + + + + + + + parsons.ngpvan.survey_questions — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.ngpvan.survey_questions
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.ngpvan.survey_questions

+"""NGPVAN Survey Questions Endpoints"""
+from parsons.etl.table import Table
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class SurveyQuestions(object): + + def __init__(self, van_connection): + + self.connection = van_connection + +
[docs] def get_survey_questions(self, statuses=['Active'], name=None, sq_type=None, question=None, + cycle=None): + """ + Get survey questions. + + `Args:` + statuses: list + Filter to a list of statuses of survey questions. One or more of ``Active``, + ``Archived``, and ``Inactive``. + name: str + Filter to survey questions with names begin with the input. + type: str + Filter to survey questions of a given type. + question: str + Filter to survey questions with script questions that contain the given input. + cycle: str + Filter to survey suestions with the given cycle. A year in the format "YYYY". + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + params = {'statuses': statuses, + '$top': self.page_size, + 'name': name, + 'type': sq_type, + 'question': question, + 'cycle': cycle} + + tbl = Table(self.connection.get_request('surveyQuestions', params=params)) + logger.info(f'Found {tbl.num_rows} survey questions.') + return tbl
+ +
[docs] def get_survey_question(self, survey_question_id): + """ + Get a survey question. + + `Args:` + survey_question_id: int + The survey question id. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + r = self.connection.get_request(f'surveyQuestions/{survey_question_id}') + logger.info(f'Found survey question {survey_question_id}.') + return r
+ +
[docs] def apply_survey_response(self, id, survey_question_id, survey_response_id, + id_type='vanid', result_code_id=None, contact_type_id=None, + input_type_id=None, date_canvassed=None): + """ + Apply a single survey response to a person. + + `Args:` + id: str + A valid person id + survey_question_id: int + A valid survey question id + survey_response_id: int + A valid survey response id + id_type: str + A known person identifier type available on this VAN instance + such as ``dwid`` + result_code_id : int + `Optional`; Specifies the result code of the response. If + not included,responses must be specified. Conversely, if + responses are specified, result_code_id must be null. Valid ids + can be found by using the :meth:`get_canvass_responses_result_codes` + contact_type_id : int + `Optional`; A valid contact type id + input_type_id : int + `Optional`; Defaults to 11 (API Input) + date_canvassed : str + `Optional`; ISO 8601 formatted date. Defaults to todays date + """ + + response = {"surveyQuestionId": survey_question_id, + "surveyResponseId": survey_response_id, + "type": "surveyResponse"} + + logger.info(f'Applying survey question {survey_question_id} to {id_type} {id}') + self.apply_response(id, response, id_type, result_code_id=result_code_id, + contact_type_id=contact_type_id, input_type_id=input_type_id, + date_canvassed=date_canvassed)
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/notifications/gmail.html b/docs/html/_modules/parsons/notifications/gmail.html new file mode 100644 index 0000000000..baf3cc9e81 --- /dev/null +++ b/docs/html/_modules/parsons/notifications/gmail.html @@ -0,0 +1,328 @@ + + + + + + + + + + + parsons.notifications.gmail — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.notifications.gmail
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.notifications.gmail

+import base64
+from apiclient import errors
+from googleapiclient.discovery import build
+from httplib2 import Http
+from oauth2client import file, client, tools
+from parsons.notifications.sendmail import SendMail
+
+SCOPES = 'https://www.googleapis.com/auth/gmail.send'
+
+
+
[docs]class Gmail(SendMail): + """Create a Gmail object, for sending emails. + + `Args:` + creds_path: str + The path to the credentials.json file. + token_path: str + The path to the token.json file. + user_id: str + Optional; Sender email address. Defaults to the special value + "me" which is used to indicate the authenticated user. + """ + + def __init__(self, creds_path=None, token_path=None, user_id='me'): + + self.user_id = user_id + + if not creds_path: + raise ValueError("Invalid path to credentials.json.") + + if not token_path: + raise ValueError("Invalid path to token.json.") + + self.store = file.Storage(token_path) + self.creds = self.store.get() + + # BUG-1 + # http = httplib2shim.Http() + + if not self.creds or self.creds.invalid: + flow = client.flow_from_clientsecrets(creds_path, SCOPES) + self.creds = tools.run_flow(flow, self.store) + + # BUG-1 + # self.creds = self.run_flow(flow, self.store, http=http) + + self.service = build('gmail', 'v1', http=self.creds.authorize(Http())) + + # BUG-1 + # self.service = build('gmail', 'v1', http=self.creds.authorize(http)) + + def _encode_raw_message(self, message): + return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()} + + def _send_message(self, msg): + """Send an email message. + + `Args:` + message: dict + Message to be sent as a base64url encode object. + i.e. the objects created by the create_* instance methods + `Returns:` + dict + A Users.messages object see `https://developers.google.com/gmail/api/v1/reference/users/messages#resource.` # noqa + for more info. + """ + self.log.info("Sending a message...") + + message = self._encode_raw_message(msg) + + self.log.debug(message) + + try: + message = (self.service.users().messages() + .send(userId=self.user_id, body=message).execute()) + except errors.HttpError: + self.log.exception( + 'An error occurred: while attempting to send a message.') + raise + else: + self.log.debug(message) + self.log.info( + f"Message sent succesfully (Message Id: {message['id']})") + + return message
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/notifications/slack.html b/docs/html/_modules/parsons/notifications/slack.html new file mode 100644 index 0000000000..48302bc037 --- /dev/null +++ b/docs/html/_modules/parsons/notifications/slack.html @@ -0,0 +1,476 @@ + + + + + + + + + + + parsons.notifications.slack — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.notifications.slack
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.notifications.slack

+import os
+import time
+
+from parsons.etl.table import Table
+from parsons.utilities.check_env import check
+
+from slackclient import SlackClient
+from slackclient.exceptions import SlackClientError
+
+import requests
+
+
+
[docs]class Slack(object): + + def __init__(self, api_key=None): + + if api_key is None: + + try: + self.api_key = os.environ["SLACK_API_TOKEN"] + + except KeyError: + raise KeyError('Missing api_key. It must be passed as an ' + 'argument or stored as environmental variable') + + else: + + self.api_key = api_key + + self.client = SlackClient(self.api_key) + +
[docs] def channels(self, fields=['id', 'name'], exclude_archived=False, + types=['public_channel']): + """ + Return a list of all channels in a Slack team. + + `Args:` + fields: list + A list of the fields to return. By default, only the channel + `id` and `name` are returned. See + https://api.slack.com/methods/conversations.list for a full + list of available fields. `Notes:` nested fields are unpacked. + exclude_archived: bool + Set to `True` to exclude archived channels from the list. + Default is false. + types: list + Mix and match channel types by providing a list of any + combination of `public_channel`, `private_channel`, + `mpim` (aka group messages), or `im` (aka 1-1 messages). + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + tbl = self._paginate_request( + "conversations.list", "channels", types=types, + exclude_archived=exclude_archived) + + tbl.unpack_dict("topic", include_original=False, prepend=True, + prepend_value="topic") + tbl.unpack_dict("purpose", include_original=False, + prepend=True, prepend_value="purpose") + + rm_cols = [x for x in tbl.columns if x not in fields] + tbl.remove_column(*rm_cols) + + return tbl
+ +
[docs] def users(self, fields=['id', 'name', 'deleted', 'profile_real_name_normalized', + 'profile_email']): + """ + Return a list of all users in a Slack team. + + `Args:` + fields: list + A list of the fields to return. By default, only the user + `id` and `name` and `deleted` status are returned. See + https://api.slack.com/methods/users.list for a full list of + available fields. `Notes:` nested fields are unpacked. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + tbl = self._paginate_request("users.list", "members", include_locale=True) + + tbl.unpack_dict("profile", include_original=False, prepend=True, + prepend_value="profile") + + rm_cols = [x for x in tbl.columns if x not in fields] + tbl.remove_column(*rm_cols) + + return tbl
+ +
[docs] @classmethod + def message(cls, channel, text, webhook=None, parent_message_id=None): + """ + Send a message to a Slack channel with a webhook instead of an api_key. + You might not have the full-access API key but still want to notify a channel + `Args:` + channel: str + The name or id of a `public_channel`, a `private_channel`, or + an `im` (aka 1-1 message). + text: str + Text of the message to send. + webhook: str + If you have a webhook url instead of an api_key + Looks like: https://hooks.slack.com/services/Txxxxxxx/Bxxxxxx/Dxxxxxxx + parent_message_id: str + The `ts` value of the parent message. If used, this will thread the message. + """ + webhook = check('SLACK_API_WEBHOOK', webhook, optional=True) + payload = {'channel': channel, 'text': text} + if parent_message_id: + payload['thread_ts'] = parent_message_id + return requests.post(webhook, json=payload)
+ +
[docs] def message_channel(self, channel, text, as_user=False, parent_message_id=None): + """ + Send a message to a Slack channel + + `Args:` + channel: str + The name or id of a `public_channel`, a `private_channel`, or + an `im` (aka 1-1 message). + text: str + Text of the message to send. + as_user: str + Pass true to post the message as the authenticated user, + instead of as a bot. Defaults to false. See + https://api.slack.com/methods/chat.postMessage#authorship for + more information about Slack authorship. + parent_message_id: str + The `ts` value of the parent message. If used, this will thread the message. + `Returns:` + `dict`: + A response json + """ + resp = self.client.api_call( + "chat.postMessage", channel=channel, text=text, + as_user=as_user, thread_ts=parent_message_id) + + if not resp['ok']: + + if resp['error'] == 'ratelimited': + time.sleep(int(resp['headers']['Retry-After'])) + + resp = self.client.api_call( + "chat.postMessage", + channel=channel, text=text, as_user=as_user) + + raise SlackClientError(resp['error']) + + return resp
+ +
[docs] def upload_file(self, channels, filename, filetype=None, + initial_comment=None, title=None, is_binary=False): + """ + Upload a file to Slack channel(s). + + `Args:` + channels: list + The list of channel names or IDs where the file will be shared. + filename: str + The path to the file to be uploaded. + filetype: str + A file type identifier. If None, type will be inferred base on + file extension. This is used to determine what fields are + available for that object. See https://api.slack.com/types/file + for a list of valid types and for more information about the + file object. + initial_comment: str + The text of the message to send along with the file. + title: str + Title of the file to be uploaded. + is_binary: bool + If True, open this file in binary mode. This is needed if + uploading binary files. Defaults to False. + `Returns:` + `dict`: + A response json + """ + if filetype is None and '.' in filename: + filetype = filename.split('.')[-1] + + mode = 'rb' if is_binary else 'r' + with open(filename, mode) as file_content: + resp = self.client.api_call( + "files.upload", channels=channels, file=file_content, + filetype=filetype, initial_comment=initial_comment, + title=title) + + if not resp['ok']: + + if resp['error'] == 'ratelimited': + time.sleep(int(resp['headers']['Retry-After'])) + + resp = self.client.api_call( + "files.upload", channels=channels, file=file_content, + filetype=filetype, initial_comment=initial_comment, + title=title) + + raise SlackClientError(resp['error']) + + return resp
+ + def _paginate_request(self, endpoint, collection, **kwargs): + # The max object we're requesting at a time. + # This is an nternal limit to not overload slack api + LIMIT = 200 + + items = [] + next_page = True + cursor = None + while next_page: + resp = self.client.api_call( + endpoint, cursor=cursor, limit=LIMIT, **kwargs) + + if not resp['ok']: + + if resp['error'] == 'ratelimited': + time.sleep(int(resp['headers']['Retry-After'])) + continue + + raise SlackClientError(resp['error']) + + items.extend(resp[collection]) + + if resp["response_metadata"]["next_cursor"]: + cursor = resp["response_metadata"]["next_cursor"] + else: + next_page = False + + return Table(items)
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/phone2action/p2a.html b/docs/html/_modules/parsons/phone2action/p2a.html new file mode 100644 index 0000000000..ccb55899a7 --- /dev/null +++ b/docs/html/_modules/parsons/phone2action/p2a.html @@ -0,0 +1,622 @@ + + + + + + + + + + + parsons.phone2action.p2a — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.phone2action.p2a
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.phone2action.p2a

+from requests.auth import HTTPBasicAuth
+from parsons.etl import Table
+from parsons.utilities import check_env
+from parsons.utilities.api_connector import APIConnector
+from parsons.utilities.datetime import date_to_timestamp
+import logging
+
+logger = logging.getLogger(__name__)
+
+PHONE2ACTION_URI = 'https://api.phone2action.com/2.0/'
+
+
+
[docs]class Phone2Action(object): + """ + Instantiate Phone2Action Class + + `Args:` + app_id: str + The Phone2Action provided application id. Not required if ``PHONE2ACTION_APP_ID`` + env variable set. + app_key: str + The Phone2Action provided application key. Not required if ``PHONE2ACTION_APP_KEY`` + env variable set. + `Returns:` + Phone2Action Class + """ + + def __init__(self, app_id=None, app_key=None): + + self.app_id = check_env.check('PHONE2ACTION_APP_ID', app_id) + self.app_key = check_env.check('PHONE2ACTION_APP_KEY', app_key) + self.auth = HTTPBasicAuth(self.app_id, self.app_key) + self.client = APIConnector(PHONE2ACTION_URI, auth=self.auth) + + def _paginate_request(self, url, args=None, page=None): + # Internal pagination method + + if page is not None: + args['page'] = page + + r = self.client.get_request(url, params=args) + + json = r['data'] + + if page is not None: + return json + + # If count of items is less than the total allowed per page, paginate + while r['pagination']['count'] == r['pagination']['per_page']: + + r = self.client.get_request(r['pagination']['next_url'], args) + json.extend(r['data']) + + return json + +
[docs] def get_advocates(self, state=None, campaign_id=None, updated_since=None, page=None): + """ + Return advocates (person records). + + If no page is specified, the method will automatically paginate through the available + advocates. + + `Args:` + state: str + Filter by US postal abbreviation for a state + or territory e.g., "CA" "NY" or "DC" + campaign_id: int + Filter to specific campaign + updated_since: str or int or datetime + Fetch all advocates updated since the date provided; this can be a datetime + object, a UNIX timestamp, or a date string (ex. '2014-01-05 23:59:43') + page: int + Page number of data to fetch; if this is specified, call will only return one + page. + `Returns:` + A dict of parsons tables: + * emails + * phones + * memberships + * tags + * ids + * fields + * advocates + """ + + # Convert the passed in updated_since into a Unix timestamp (which is what the API wants) + updated_since = date_to_timestamp(updated_since) + + args = {'state': state, + 'campaignid': campaign_id, + 'updatedSince': updated_since} + + logger.info('Retrieving advocates...') + json = self._paginate_request('advocates', args=args, page=page) + + return self._advocates_tables(Table(json))
+ + def _advocates_tables(self, tbl): + # Convert the advocates nested table into multiple tables + + tbls = { + 'advocates': tbl, + 'emails': Table(), + 'phones': Table(), + 'memberships': Table(), + 'tags': Table(), + 'ids': Table(), + 'fields': Table(), + } + + if not tbl: + return tbls + + logger.info(f'Retrieved {tbl.num_rows} advocates...') + + # Unpack all of the single objects + # The Phone2Action API docs says that created_at and updated_at are dictionaries, but + # the data returned from the server is a ISO8601 timestamp. - EHS, 05/21/2020 + for c in ['address', 'districts']: + tbl.unpack_dict(c) + + # Unpack all of the arrays + child_tables = [child for child in tbls.keys() if child != 'advocates'] + for c in child_tables: + tbls[c] = tbl.long_table(['id'], c, key_rename={'id': 'advocate_id'}) + + return tbls + +
[docs] def get_campaigns(self, state=None, zip=None, include_generic=False, include_private=False, + include_content=True): + """ + Returns a list of campaigns + + `Args:` + state: str + Filter by US postal abbreviation for a state or territory e.g., "CA" "NY" or "DC" + zip: int + Filter by 5 digit zip code + include_generic: boolean + When filtering by state or ZIP code, include unrestricted campaigns + include_private: boolean + If true, will include private campaigns in results + include_content: boolean + If true, include campaign content fields, which may vary. This may cause + sync errors. + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + args = {'state': state, + 'zip': zip, + 'includeGeneric': str(include_generic), + 'includePrivate': str(include_private) + } + + tbl = Table(self.client.get_request('campaigns', params=args)) + tbl.unpack_dict('updated_at') + if include_content: + tbl.unpack_dict('content') + + return tbl
+ +
[docs] def create_advocate(self, + campaigns, + first_name=None, + last_name=None, + email=None, + phone=None, + address1=None, + address2=None, + city=None, + state=None, + zip5=None, + sms_optin=None, + email_optin=None, + sms_optout=None, + email_optout=None, + **kwargs): + """ + Create an advocate. + + If you want to opt an advocate into or out of SMS / email campaigns, you must provide + the email address or phone number (accordingly). + + The list of arguments only partially covers the fields that can be set on the advocate. + For a complete list of fields that can be updated, see + `the Phone2Action API documentation <https://docs.phone2action.com/#calls-create>`_. + + `Args:` + campaigns: list + The ID(s) of campaigns to add the advocate to + first_name: str + `Optional`; The first name of the advocate + last_name: str + `Optional`; The last name of the advocate + email: str + `Optional`; An email address to add for the advocate. One of ``email`` or ``phone`` + is required. + phone: str + `Optional`; An phone # to add for the advocate. One of ``email`` or ``phone`` is + required. + address1: str + `Optional`; The first line of the advocates' address + address2: str + `Optional`; The second line of the advocates' address + city: str + `Optional`; The city of the advocates address + state: str + `Optional`; The state of the advocates address + zip5: str + `Optional`; The 5 digit Zip code of the advocate + sms_optin: boolean + `Optional`; Whether to opt the advocate into receiving text messages; an SMS + confirmation text message will be sent. You must provide values for the ``phone`` + and ``campaigns`` arguments. + email_optin: boolean + `Optional`; Whether to opt the advocate into receiving emails. You must provide + values for the ``email`` and ``campaigns`` arguments. + sms_optout: boolean + `Optional`; Whether to opt the advocate out of receiving text messages. You must + provide values for the ``phone`` and ``campaigns`` arguments. Once an advocate is + opted out, they cannot be opted back in. + email_optout: boolean + `Optional`; Whether to opt the advocate out of receiving emails. You must + provide values for the ``email`` and ``campaigns`` arguments. Once an advocate is + opted out, they cannot be opted back in. + **kwargs: + Additional fields on the advocate to update + `Returns:` + The int ID of the created advocate. + """ + + # Validate the passed in arguments + + if not campaigns: + raise ValueError( + 'When creating an advocate, you must specify one or more campaigns.') + + if not email and not phone: + raise ValueError( + 'When creating an advocate, you must provide an email address or a phone number.') + + if (sms_optin or sms_optout) and not phone: + raise ValueError( + 'When opting an advocate in or out of SMS messages, you must specify a valid ' + 'phone and one or more campaigns') + + if (email_optin or email_optout) and not email: + raise ValueError( + 'When opting an advocate in or out of email messages, you must specify a valid ' + 'email address and one or more campaigns') + + # Align our arguments with the expected parameters for the API + payload = { + 'email': email, + 'phone': phone, + 'firstname': first_name, + 'lastname': last_name, + 'address1': address1, + 'address2': address2, + 'city': city, + 'state': state, + 'zip5': zip5, + 'smsOptin': 1 if sms_optin else None, + 'emailOptin': 1 if email_optin else None, + 'smsOptout': 1 if sms_optout else None, + 'emailOptout': 1 if email_optout else None, + } + + # Clean up any keys that have a "None" value + payload = { + key: val + for key, val in payload.items() + if val is not None + } + + # Merge in any kwargs + payload.update(kwargs) + + # Turn into a list of items so we can append multiple campaigns + campaign_keys = [('campaigns[]', val) for val in campaigns] + data = [(key, value) for key, value in payload.items()] + campaign_keys + + # Call into the Phone2Action API + response = self.client.post_request('advocates', data=data) + return response['advocateid']
+ +
[docs] def update_advocate(self, + advocate_id, + campaigns=None, + email=None, + phone=None, + sms_optin=None, + email_optin=None, + sms_optout=None, + email_optout=None, + **kwargs): + """ + Update the fields of an advocate. + + If you want to opt an advocate into or out of SMS / email campaigns, you must provide + the email address or phone number along with a list of campaigns. + + The list of arguments only partially covers the fields that can be updated on the advocate. + For a complete list of fields that can be updated, see + `the Phone2Action API documentation <https://docs.phone2action.com/#calls-create>`_. + + `Args:` + advocate_id: integer + The ID of the advocate being updates + campaigns: list + `Optional`; The ID(s) of campaigns to add the user to + email: str + `Optional`; An email address to add for the advocate (or to use when opting in/out) + phone: str + `Optional`; An phone # to add for the advocate (or to use when opting in/out) + sms_optin: boolean + `Optional`; Whether to opt the advocate into receiving text messages; an SMS + confirmation text message will be sent. You must provide values for the ``phone`` + and ``campaigns`` arguments. + email_optin: boolean + `Optional`; Whether to opt the advocate into receiving emails. You must provide + values for the ``email`` and ``campaigns`` arguments. + sms_optout: boolean + `Optional`; Whether to opt the advocate out of receiving text messages. You must + provide values for the ``phone`` and ``campaigns`` arguments. Once an advocate is + opted out, they cannot be opted back in. + email_optout: boolean + `Optional`; Whether to opt the advocate out of receiving emails. You must + provide values for the ``email`` and ``campaigns`` arguments. Once an advocate is + opted out, they cannot be opted back in. + **kwargs: + Additional fields on the advocate to update + """ + + # Validate the passed in arguments + if (sms_optin or sms_optout) and not (phone and campaigns): + raise ValueError( + 'When opting an advocate in or out of SMS messages, you must specify a valid ' + 'phone and one or more campaigns') + + if (email_optin or email_optout) and not (email and campaigns): + raise ValueError( + 'When opting an advocate in or out of email messages, you must specify a valid ' + 'email address and one or more campaigns') + + # Align our arguments with the expected parameters for the API + payload = { + 'advocateid': advocate_id, + 'campaigns': campaigns, + 'email': email, + 'phone': phone, + 'smsOptin': 1 if sms_optin else None, + 'emailOptin': 1 if email_optin else None, + 'smsOptout': 1 if sms_optout else None, + 'emailOptout': 1 if email_optout else None, + # remap first_name / last_name to be consistent with updated_advocates + 'firstname': kwargs.pop('first_name', None), + 'lastname': kwargs.pop('last_name', None), + } + + # Clean up any keys that have a "None" value + payload = { + key: val + for key, val in payload.items() + if val is not None + } + + # Merge in any kwargs + payload.update(kwargs) + + # Turn into a list of items so we can append multiple campaigns + campaigns = campaigns or [] + campaign_keys = [('campaigns[]', val) for val in campaigns] + data = [(key, value) for key, value in payload.items()] + campaign_keys + + # Call into the Phone2Action API + self.client.post_request('advocates', data=data)
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/sftp/sftp.html b/docs/html/_modules/parsons/sftp/sftp.html new file mode 100644 index 0000000000..8bda30d791 --- /dev/null +++ b/docs/html/_modules/parsons/sftp/sftp.html @@ -0,0 +1,477 @@ + + + + + + + + + + + parsons.sftp.sftp — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +

Source code for parsons.sftp.sftp

+from contextlib import contextmanager
+import paramiko
+
+from parsons.utilities import files
+from parsons.etl import Table
+
+
+
[docs]class SFTP(object): + """ + Instantiate SFTP Class + + `Args:` + host: str + The host name + username: str + The user name + password: str + The password + rsa_private_key_file str + Absolute path to a private RSA key used + to authenticate stfp connection + port: int + Specify if different than the standard port 22 + `Returns:` + SFTP Class + """ + + def __init__(self, host, username, password, port=22, rsa_private_key_file=None): + self.host = host + if not self.host: + raise ValueError("Missing the SFTP host name") + + self.username = username + if not self.username: + raise ValueError("Missing the SFTP username") + + if not (password or rsa_private_key_file): + raise ValueError("Missing password or ssh authentication key") + + self.password = password + self.rsa_private_key_file = rsa_private_key_file + self.port = port + +
[docs] @contextmanager + def create_connection(self): + """ + Create an SFTP connection. You can then utilize this in a ``with`` block + and it will close the connection when it is out of scope. You should use + this when you wish to batch multiple methods using a single connection. + + .. code-block:: python + + import SFTP + + sftp = SFTP() + connection = sftp.create_connection() + + with connection as conn: + sftp.make_directory('my_dir', connection=conn) + sftp.put_file('my_csv.csv', connection=conn) + + Returns: + SFTP Connection object + """ + + transport = paramiko.Transport((self.host, self.port)) + pkey = None + if self.rsa_private_key_file: + # we need to read it in + pkey = paramiko.RSAKey.from_private_key_file(self.rsa_private_key_file) + + transport.connect(username=self.username, password=self.password, pkey=pkey) + conn = paramiko.SFTPClient.from_transport(transport) + yield conn + conn.close() + transport.close()
+ +
[docs] def list_directory(self, remote_path='.', connection=None): + """ + List the contents of a directory + + `Args:` + remote_path: str + The remote path of the directory + connection: obj + An SFTP connection object + `Returns:` + list + """ + + if connection: + return connection.listdir(path=remote_path) + else: + with self.create_connection() as connection: + return connection.listdir(path=remote_path)
+ +
[docs] def make_directory(self, remote_path, connection=None): + """ + Makes a new directory on the SFTP server + + `Args:` + remote_path: str + The remote path of the directory + connection: obj + An SFTP connection object + """ + + if connection: + connection.mkdir(remote_path) + else: + with self.create_connection() as connection: + connection.mkdir(remote_path)
+ +
[docs] def remove_directory(self, remote_path, connection=None): + """ + Remove a directory from the SFTP server + + `Args:` + remote_path: str + The remote path of the directory + connection: obj + An SFTP connection object + """ + + if connection: + connection.rmdir(remote_path) + else: + with self.create_connection() as connection: + connection.rmdir(remote_path)
+ +
[docs] def get_file(self, remote_path, local_path=None, connection=None): + """ + Download a file from the SFTP server + + `Args:` + remote_path: str + The remote path of the file to download + local_path: str + The local path where the file will be downloaded. If not specified, a temporary + file will be created and returned, and that file will be removed automatically + when the script is done running. + connection: obj + An SFTP connection object + `Returns:` + str + The path of the local file + """ + + if not local_path: + local_path = files.create_temp_file_for_path(remote_path) + + if connection: + connection.get(remote_path, local_path) + with self.create_connection() as connection: + connection.get(remote_path, local_path) + + return local_path
+ +
[docs] def get_table(self, remote_path, connection=None): + """ + Download a csv from the server and convert into a Parsons table. + + The file may be compressed with gzip, or zip, but may not contain + multiple files in the archive. + + `Args:` + remote_path: str + The remote path of the file to download + connection: obj + An SFTP connection object + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + if not files.valid_table_suffix(remote_path): + raise ValueError('File type cannot be converted to a Parsons table.') + + return Table.from_csv(self.get_file(remote_path, connection=connection))
+ +
[docs] def put_file(self, local_path, remote_path, connection=None): + """ + Put a file on the SFTP server + `Args:` + local_path: str + The local path of the source file + remote_path: str + The remote path of the new file + connection: obj + An SFTP connection object + """ + if connection: + connection.put(local_path, remote_path) + with self.create_connection() as connection: + connection.put(local_path, remote_path)
+ +
[docs] def remove_file(self, remote_path, connection=None): + """ + Delete a file on the SFTP server + + `Args:` + remote_path: str + The remote path of the file + connection: obj + An SFTP connection object + """ + + if connection: + connection.remove(remote_path) + with self.create_connection() as connection: + connection.remove(remote_path)
+ +
[docs] def get_file_size(self, remote_path, connection=None): + """ + Get the size of a file in MB on the SFTP server. The file is + not downloaded locally. + + `Args:` + remote_path: str + The remote path of the file + connection: obj + An SFTP connection object + `Returns:` + int + The file size in MB. + """ + + if connection: + size = connection.file(remote_path, 'r')._get_size() + else: + with self.create_connection() as connection: + size = connection.file(remote_path, 'r')._get_size() + + return size / 1024
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/targetsmart/targetsmart.html b/docs/html/_modules/parsons/targetsmart/targetsmart.html new file mode 100644 index 0000000000..a0dfc9dfc8 --- /dev/null +++ b/docs/html/_modules/parsons/targetsmart/targetsmart.html @@ -0,0 +1,558 @@ + + + + + + + + + + + parsons.targetsmart.targetsmart — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.targetsmart.targetsmart
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.targetsmart.targetsmart

+import requests
+import os
+import petl
+from parsons.etl.table import Table
+
+
+class TargetSmartConnector(object):
+
+    def __init__(self, api_key=None, uri='https://api.targetsmart.com/'):
+
+        if api_key is None:
+
+            try:
+                api_key = os.environ['TS_API_KEY']
+            except KeyError:
+                raise KeyError('No TargetSmart API key found. Please store'
+                               ' in environment variable or pass as an'
+                               'argument.')
+
+        self.uri = uri
+        self.api_key = api_key
+        self.headers = {'x-api-key': self.api_key}
+
+    def request(self, url, args=None, raw=False):
+
+        r = requests.get(url, headers=self.headers, params=args)
+
+        # This allows me to deal with data that needs to be munged.
+        if raw:
+
+            return r.json()
+
+        return Table(r.json()['output'])
+
+
+class Person(object):
+
+    def __init__(self):
+
+        return None
+
+    def data_enhance(self, search_id, search_id_type='voterbase', state=None):
+        """
+        Searches for a record based on an id or phone or email address
+
+        `Args:`
+            search_id: str
+                The primary key or email address or phone number
+            search_id_type: str
+                One of ``voterbase``, ``exacttrack``, ``abilitec_consumer_link``, ``phone``,
+                ``email``, ``smartvan``, ``votebuilder``, ``voter``, ``household``.
+            state: str
+                Two character state code. Required if ``search_id_type`` of ``smartvan``,
+                ``votebuilder`` or ``voter``.
+        `Returns`
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        if search_id_type in ['smartvan', 'votebuilder', 'voter'] and state is None:
+
+            raise KeyError("Search ID type '{}' requires state kwarg".format(search_id_type))
+
+        if search_id_type not in ('voterbase', 'exacttrack', 'abilitec_consumer_link', 'phone',
+                                  'email', 'smartvan', 'votebuilder', 'voter', 'household'):
+
+            raise ValueError('Search_id_type is not valid')
+
+        url = self.connection.uri + 'person/data-enhance'
+
+        args = {'search_id': search_id,
+                'search_id_type': search_id_type,
+                'state': state
+                }
+
+        return self.connection.request(url, args=args)
+
+    def radius_search(self, first_name, last_name, middle_name=None, name_suffix=None,
+                      latitude=None, longitude=None, address=None, address_type='reg',
+                      radius_size=10, radius_unit='miles', max_results=10, gender='a',
+                      age_min=None, age_max=None, composite_score_min=1, composite_score_max=100,
+                      last_name_exact=True, last_name_is_prefix=False, last_name_prefix_length=10):
+        """
+        Search for a person based on a specified radius
+
+        `Args`:
+            first_name: str
+                One or more alpha characters
+            last_name: str
+                One or more alpha characters
+            middle_name: str
+                One or more alpha characters
+            name_suffix: str
+                One or more alpha characters
+            latitude: float
+                Floating point number (e.g. 33.738987255507)
+            longitude: float
+                Floating point number (e.g. -116.40833849559)
+            address: str
+                Any geocode-able address
+            address_type: str
+                ``reg`` for registration (default) or ``tsmart`` for TargetSmart
+            radius_unit: str
+                One of ``meters``, ``feet``, ``miles`` (default), or ``kilometers``.
+            max_results: int
+                Default of ``10``. An integer in range [0 - 100]
+            gender: str
+                Default of ``a``. One of ``m``, ``f``, ``u``, ``a``.
+            age_min: int
+                A positive integer
+            age_max: int
+                A positive integer
+            composite_score_min: int
+                An integer in range [1 - 100]. Filter out results with composite score
+                less than this value.
+            composite_score_max: int
+                An integer in range [1 - 100]. Filter out results with composite score
+                greater than this value.
+            last_name_exact: boolean
+                By default, the full last name is used for finding matches if the length of the
+                last name is not longer than 10 characters. As an example, “anders” is less likely
+                to match to “anderson” with this enabled. Disable this option if you are using
+                either ``last_name_is_prefix`` or ``last_name_prefix_length``.
+            last_name_is_prefix: boolean
+                By default, the full last name is used for finding matches. Enable this parameter
+                if your search last name is truncated. This can be common for some client
+                applications that for various reasons do not have full last names. Use this
+                parameter along with ``last_name_prefix_length`` to configure the length of the last
+                name prefix. This parameter is ignored if ``last_name_exact`` is enabled.
+            last_name_prefix_length: int
+                By default, up to the first 10 characters of the search last name are used for
+                finding relative matches. This value must be between 3 and 10. This parameter is
+                ignored if last_name_exact is enabled.
+        `Returns`
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        if (latitude is None or longitude is None) and address is None:
+            raise ValueError('Lat/Long or Address required')
+
+        # Convert booleans
+        for a in [last_name_exact, last_name_is_prefix]:
+            a = str(a)
+
+        url = self.connection.uri + 'person/radius-search'
+
+        args = {'first_name': first_name,
+                'last_name': last_name,
+                'middle_name': middle_name,
+                'name_suffix': name_suffix,
+                'latitude': latitude,
+                'longitude': longitude,
+                'address': address,
+                'radius_size': radius_size,
+                'radius_unit': radius_unit,
+                'max_results': max_results,
+                'gender': gender,
+                'age_min': age_min,
+                'age_max': age_max,
+                'composite_score_min': composite_score_min,
+                'composite_score_max': composite_score_max,
+                'last_name_exact': last_name_exact,
+                'last_name_is_prefix': last_name_is_prefix,
+                'last_name_prefix_length': last_name_prefix_length
+                }
+
+        r = self.connection.request(url, args=args, raw=True)
+        return Table([itm for itm in r['output']]).unpack_dict('data_fields', prepend=False)
+
+    def phone(self, table):
+        """
+        Match based on a list of 500 phones numbers. Table
+        can contain up to 500 phone numbers to match
+
+        `Args:`
+            table: parsons table
+                See :ref:`parsons-table`. One row per phone number,
+                up to 500 phone numbers.
+        `Returns:`
+            See :ref:`parsons-table` for output options.
+        """
+
+        url = self.connection.uri + 'person/phone-search'
+
+        args = {'phones': list(petl.values(table.table, 0))}
+
+        return Table(self.connection.request(url, args=args, raw=True)['result'])
+
+
+class Service(object):
+
+    def __init__(self):
+
+        return None
+
+    def district(self, search_type='zip', address=None, zip5=None, zip4=None, state=None,
+                 latitude=None, longitude=None):
+        """
+        Return district information based on a geographic point. The method allows you to
+        search based on the following:
+
+        .. list-table::
+            :widths: 30 30 30
+            :header-rows: 1
+
+            * - Search Type
+              - Search Type Name
+              - Required kwarg(s)
+            * - Zip Code
+              - ``zip``
+              - ``zip5``, ``zip4``
+            * - Address
+              - ``address``
+              - ``address``
+            * - Point
+              - point
+              - ``latitude``, ``longitude``
+
+        `Args`:
+            search_type: str
+                The type of district search to perform. One of ``zip``, ``address``
+                or ``point``.
+            address: str
+                An uparsed full address
+            zip5: str
+                The USPS Zip5 code
+            zip4: str
+                The USPS Zip4 code
+            state: str
+                The two character state code
+            latitude: float or str
+                Valid latitude floating point
+            lontitude: float or str
+                Valid longitude floating point
+        `Returns`:
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        if search_type == 'zip' and None in [zip5, zip4]:
+            raise ValueError("Search type 'zip' requires 'zip5' and 'zip4' arguments")
+
+        elif search_type == 'point' and None in [latitude, longitude]:
+            raise ValueError("Search type 'point' requires 'latitude' and 'longitude' arguments")
+
+        elif search_type == 'address' and None in [address]:
+            raise ValueError("Search type 'address' requires 'address' argument")
+
+        elif search_type not in ['zip', 'point', 'address']:
+            raise KeyError("Invalid 'search_type' provided. ")
+
+        else:
+            pass
+
+        url = self.connection.uri + 'service/district'
+
+        args = {'search_type': search_type,
+                'address': address,
+                'zip5': zip5,
+                'zip4': zip4,
+                'state': state,
+                'latitude': latitude,
+                'longitude': longitude
+                }
+
+        return Table([self.connection.request(url, args=args, raw=True)['match_data']])
+
+
+class Voter(object):
+
+    def __init__(self, connection):
+
+        self.connection = connection
+
+    def voter_registration_check(self, first_name=None, last_name=None,
+                                 state=None, street_number=None,
+                                 street_name=None, city=None, zip_code=None,
+                                 age=None, dob=None, phone=None, email=None,
+                                 unparsed_full_address=None,
+                                 obj_type="dict"):
+        """
+        Searches for a registered individual, returns matches.
+
+        A search must include the at minimum first name, last name and state.
+
+        `Args:`
+            first_name: str
+                Required; One or more alpha characters. Trailing wildcard allowed
+            last_name: str
+                Required; One or more alpha characters. Trailing wildcard allowed
+            state: str
+                Required; Two character state code (e.g. ``NY``)
+            street_number: str
+                Optional; One or more alpha characters. Trailing wildcard allowed
+            street_name: str
+                Optional; One or more alpha characters. Trailing wildcard allowed
+            city: str
+                Optional; The person's home city
+            zip_code: str
+                Optional; Numeric characters. Trailing wildcard allowed
+            age; int
+                Optional; One or more integers. Trailing wildcard allowed
+            dob; str
+                Numeric characters in YYYYMMDD format. Trailing wildcard allowed
+            phone; str
+                Integer followed by 0 or more * or integers
+            email: str
+                Alphanumeric character followed by 0 or more * or legal characters
+                (alphanumeric, @, -, .)
+            unparsed_full_address: str
+                One or more alphanumeric characters. No wildcards.
+        `Returns`
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        url = self.connection.uri + 'voter/voter-registration-check'
+
+        if None in [first_name, last_name, state]:
+            raise ValueError("""Function must include at least first_name,
+                             last_name, and state.""")
+
+        args = {'first_name': first_name,
+                'last_name': last_name,
+                'state': state,
+                'street_number': street_number,
+                'street_name': street_name,
+                'city': city,
+                'zip_code': zip_code,
+                'age': age,
+                'dob': dob,
+                'phone': phone,
+                'email': email,
+                'unparsed_full_address': unparsed_full_address
+                }
+
+        return self.connection.request(url, args=args, raw=True)
+
+
+
[docs]class TargetSmart(Voter, Person, Service): + + def __init__(self, api_key=None): + + self.connection = TargetSmartConnector(api_key=api_key)
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/targetsmart/targetsmart_api.html b/docs/html/_modules/parsons/targetsmart/targetsmart_api.html new file mode 100644 index 0000000000..ae17c88a45 --- /dev/null +++ b/docs/html/_modules/parsons/targetsmart/targetsmart_api.html @@ -0,0 +1,579 @@ + + + + + + + + + + + parsons.targetsmart.targetsmart_api — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.targetsmart.targetsmart_api
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.targetsmart.targetsmart_api

+import requests
+import petl
+from parsons.etl.table import Table
+from parsons.utilities import check_env
+
+URI = 'https://api.targetsmart.com/'
+
+
+class TargetSmartConnector(object):
+
+    def __init__(self, api_key):
+        self.uri = URI
+        self.api_key = check_env.check('TS_API_KEY', api_key)
+        self.headers = {'x-api-key': self.api_key}
+
+    def request(self, url, args=None, raw=False):
+
+        r = requests.get(url, headers=self.headers, params=args)
+
+        # This allows me to deal with data that needs to be munged.
+        if raw:
+
+            return r.json()
+
+        return Table(r.json()['output'])
+
+
+class Person(object):
+
+    def __init__(self):
+
+        return None
+
+    def data_enhance(self, search_id, search_id_type='voterbase', state=None):
+        """
+        Searches for a record based on an id or phone or email address
+
+        `Args:`
+            search_id: str
+                The primary key or email address or phone number
+            search_id_type: str
+                One of ``voterbase``, ``exacttrack``, ``abilitec_consumer_link``, ``phone``,
+                ``email``, ``smartvan``, ``votebuilder``, ``voter``, ``household``.
+            state: str
+                Two character state code. Required if ``search_id_type`` of ``smartvan``,
+                ``votebuilder`` or ``voter``.
+        `Returns`
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        if search_id_type in ['smartvan', 'votebuilder', 'voter'] and state is None:
+
+            raise KeyError("Search ID type '{}' requires state kwarg".format(search_id_type))
+
+        if search_id_type not in ('voterbase', 'exacttrack', 'abilitec_consumer_link', 'phone',
+                                  'email', 'smartvan', 'votebuilder', 'voter', 'household'):
+
+            raise ValueError('Search_id_type is not valid')
+
+        url = self.connection.uri + 'person/data-enhance'
+
+        args = {'search_id': search_id,
+                'search_id_type': search_id_type,
+                'state': state
+                }
+
+        return self.connection.request(url, args=args)
+
+    def radius_search(self, first_name, last_name, middle_name=None, name_suffix=None,
+                      latitude=None, longitude=None, address=None, radius_size=10,
+                      radius_unit='miles', max_results=10, gender='a', age_min=None, age_max=None,
+                      composite_score_min=1, composite_score_max=100, last_name_exact=True,
+                      last_name_is_prefix=False, last_name_prefix_length=10):
+        """
+        Search for a person based on a specified radius
+
+        `Args`:
+            first_name: str
+                One or more alpha characters
+            last_name: str
+                One or more alpha characters
+            middle_name: str
+                One or more alpha characters
+            name_suffix: str
+                One or more alpha characters
+            latitude: float
+                Floating point number (e.g. 33.738987255507)
+            longitude: float
+                Floating point number (e.g. -116.40833849559)
+            address: str
+                Any geocode-able address
+            address_type: str
+                ``reg`` for registration (default) or ``tsmart`` for TargetSmart
+            radius_unit: str
+                One of ``meters``, ``feet``, ``miles`` (default), or ``kilometers``.
+            max_results: int
+                Default of ``10``. An integer in range [0 - 100]
+            gender: str
+                Default of ``a``. One of ``m``, ``f``, ``u``, ``a``.
+            age_min: int
+                A positive integer
+            age_max: int
+                A positive integer
+            composite_score_min: int
+                An integer in range [1 - 100]. Filter out results with composite score
+                less than this value.
+            composite_score_max: int
+                An integer in range [1 - 100]. Filter out results with composite score
+                greater than this value.
+            last_name_exact: boolean
+                By default, the full last name is used for finding matches if the length of the
+                last name is not longer than 10 characters. As an example, “anders” is less likely
+                to match to “anderson” with this enabled. Disable this option if you are using
+                either ``last_name_is_prefix`` or ``last_name_prefix_length``.
+            last_name_is_prefix: boolean
+                By default, the full last name is used for finding matches. Enable this parameter
+                if your search last name is truncated. This can be common for some client
+                applications that for various reasons do not have full last names. Use this
+                parameter along with ``last_name_prefix_length`` to configure the length of the last
+                name prefix. This parameter is ignored if ``last_name_exact`` is enabled.
+            last_name_prefix_length: int
+                By default, up to the first 10 characters of the search last name are used for
+                finding relative matches. This value must be between 3 and 10. This parameter is
+                ignored if last_name_exact is enabled.
+        `Returns`
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        if (latitude is None or longitude is None) and address is None:
+            raise ValueError('Lat/Long or Address required')
+
+        # Convert booleans
+        for a in [last_name_exact, last_name_is_prefix]:
+            a = str(a)
+
+        url = self.connection.uri + 'person/radius-search'
+
+        args = {'first_name': first_name,
+                'last_name': last_name,
+                'middle_name': middle_name,
+                'name_suffix': name_suffix,
+                'latitude': latitude,
+                'longitude': longitude,
+                'address': address,
+                'radius_size': radius_size,
+                'radius_unit': radius_unit,
+                'max_results': max_results,
+                'gender': gender,
+                'age_min': age_min,
+                'age_max': age_max,
+                'composite_score_min': composite_score_min,
+                'composite_score_max': composite_score_max,
+                'last_name_exact': last_name_exact,
+                'last_name_is_prefix': last_name_is_prefix,
+                'last_name_prefix_length': last_name_prefix_length
+                }
+
+        r = self.connection.request(url, args=args, raw=True)
+        return Table([itm for itm in r['output']]).unpack_dict('data_fields', prepend=False)
+
+    def phone(self, table):
+        """
+        Match based on a list of 500 phones numbers. Table
+        can contain up to 500 phone numbers to match
+
+        `Args:`
+            table: parsons table
+                See :ref:`parsons-table`. One row per phone number,
+                up to 500 phone numbers.
+        `Returns:`
+            See :ref:`parsons-table` for output options.
+        """
+
+        url = self.connection.uri + 'person/phone-search'
+
+        args = {'phones': list(petl.values(table.table, 0))}
+
+        return Table(self.connection.request(url, args=args, raw=True)['result'])
+
+
+class Service(object):
+
+    def __init__(self):
+
+        return None
+
+    def district(self, search_type='zip', address=None, zip5=None, zip4=None, state=None,
+                 latitude=None, longitude=None):
+        """
+        Return district information based on a geographic point. The method allows you to
+        search based on the following:
+
+        .. list-table::
+            :widths: 30 30 30
+            :header-rows: 1
+
+            * - Search Type
+              - Search Type Name
+              - Required kwarg(s)
+            * - Zip Code
+              - ``zip``
+              - ``zip5``, ``zip4``
+            * - Address
+              - ``address``
+              - ``address``
+            * - Point
+              - point
+              - ``latitude``, ``longitude``
+
+        `Args`:
+            search_type: str
+                The type of district search to perform. One of ``zip``, ``address``
+                or ``point``.
+            address: str
+                An uparsed full address
+            zip5: str
+                The USPS Zip5 code
+            zip4: str
+                The USPS Zip4 code
+            state: str
+                The two character state code
+            latitude: float or str
+                Valid latitude floating point
+            lontitude: float or str
+                Valid longitude floating point
+        `Returns`:
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        if search_type == 'zip' and None in [zip5, zip4]:
+            raise ValueError("Search type 'zip' requires 'zip5' and 'zip4' arguments")
+
+        elif search_type == 'point' and None in [latitude, longitude]:
+            raise ValueError("Search type 'point' requires 'latitude' and 'longitude' arguments")
+
+        elif search_type == 'address' and None in [address]:
+            raise ValueError("Search type 'address' requires 'address' argument")
+
+        elif search_type not in ['zip', 'point', 'address']:
+            raise KeyError("Invalid 'search_type' provided. ")
+
+        else:
+            pass
+
+        url = self.connection.uri + 'service/district'
+
+        args = {'search_type': search_type,
+                'address': address,
+                'zip5': zip5,
+                'zip4': zip4,
+                'state': state,
+                'latitude': latitude,
+                'longitude': longitude
+                }
+
+        return Table([self.connection.request(url, args=args, raw=True)['match_data']])
+
+
+class Voter(object):
+
+    def __init__(self, connection):
+
+        self.connection = connection
+
+    def voter_registration_check(self, first_name=None, last_name=None,
+                                 state=None, street_number=None,
+                                 street_name=None, city=None, zip_code=None,
+                                 age=None, dob=None, phone=None, email=None,
+                                 unparsed_full_address=None):
+        """
+        Searches for a registered individual, returns matches.
+
+        A search must include the at minimum first name, last name and state.
+
+        `Args:`
+            first_name: str
+                Required; One or more alpha characters. Trailing wildcard allowed
+            last_name: str
+                Required; One or more alpha characters. Trailing wildcard allowed
+            state: str
+                Required; Two character state code (e.g. ``NY``)
+            street_number: str
+                Optional; One or more alpha characters. Trailing wildcard allowed
+            street_name: str
+                Optional; One or more alpha characters. Trailing wildcard allowed
+            city: str
+                Optional; The person's home city
+            zip_code: str
+                Optional; Numeric characters. Trailing wildcard allowed
+            age; int
+                Optional; One or more integers. Trailing wildcard allowed
+            dob; str
+                Numeric characters in YYYYMMDD format. Trailing wildcard allowed
+            phone; str
+                Integer followed by 0 or more * or integers
+            email: str
+                Alphanumeric character followed by 0 or more * or legal characters
+                (alphanumeric, @, -, .)
+            unparsed_full_address: str
+                One or more alphanumeric characters. No wildcards.
+        `Returns`
+            Parsons Table
+                See :ref:`parsons-table` for output options.
+        """
+
+        url = self.connection.uri + 'voter/voter-registration-check'
+
+        if None in [first_name, last_name, state]:
+            raise ValueError("""Function must include at least first_name,
+                             last_name, and state.""")
+
+        args = {'first_name': first_name,
+                'last_name': last_name,
+                'state': state,
+                'street_number': street_number,
+                'street_name': street_name,
+                'city': city,
+                'zip_code': zip_code,
+                'age': age,
+                'dob': dob,
+                'phone': phone,
+                'email': email,
+                'unparsed_full_address': unparsed_full_address
+                }
+
+        return self.connection.request(url, args=args, raw=True)
+
+
+
[docs]class TargetSmartAPI(Voter, Person, Service): + + def __init__(self, api_key=None): + + self.connection = TargetSmartConnector(api_key=api_key)
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/targetsmart/targetsmart_automation.html b/docs/html/_modules/parsons/targetsmart/targetsmart_automation.html new file mode 100644 index 0000000000..04074c1df1 --- /dev/null +++ b/docs/html/_modules/parsons/targetsmart/targetsmart_automation.html @@ -0,0 +1,444 @@ + + + + + + + + + + + parsons.targetsmart.targetsmart_automation — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.targetsmart.targetsmart_automation
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.targetsmart.targetsmart_automation

+from parsons.sftp.sftp import SFTP
+from parsons.etl.table import Table
+from parsons.utilities.files import create_temp_file
+from parsons.utilities import check_env
+import xml.etree.ElementTree as ET
+import uuid
+import time
+import logging
+import xmltodict
+
+
+TS_STFP_HOST = 'sftp.targetsmart.com'
+TS_SFTP_PORT = 2222
+TS_SFTP_DIR = 'automation'
+
+logger = logging.getLogger(__name__)
+
+# Automation matching documentation can be found here:
+# https://docs.targetsmart.com/developers/automation/index.html
+
+# The columns are heavily customized by TS, so while I would like
+# to do more column validation and mapping, I'm not sure that is
+# going to be possible.
+
+
+
[docs]class TargetSmartAutomation(object): + + def __init__(self, sftp_username=None, sftp_password=None): + + self.sftp_host = TS_STFP_HOST + self.sftp_port = TS_SFTP_PORT + self.sftp_dir = TS_SFTP_DIR + self.sftp_username = check_env.check('TS_SFTP_USERNAME', sftp_username) + self.sftp_password = check_env.check('TS_SFTP_PASSWORD', sftp_password) + self.sftp = SFTP(self.sftp_host, self.sftp_username, self.sftp_password, self.sftp_port) + +
[docs] def match(self, table, job_type, job_name=None, emails=None, call_back=None, remove_files=True): + """ + Match a table to TargetSmart using their bulk matching service. + + .. warning:: + Table Columns + The automation job does not validates the file by column indexes + rather than columns names. So, if it expected 10 columns and you + only provide 9, it will fail. However, if you provide 10 columns that + are out of order, the job will succeed, but the records will not + match. + + Args: + table: Parsons Table Object + A table object with the required columns. (Required columns provided be TargetSmart) + job_type: str + The match job type. **This is case sensitive.** (Match job names provided by TargetSmart) + job_name: str + Optional job name. + emails: list + A list of emails that will received status notifications. This + is useful in debugging failed jobs. + call_back: str + A callback url to which the status will be posted. See + `TargetSmart documentation <https://docs.targetsmart.com/developers/automation/index.html#http-callback>`_ + for more details. + remove_files: boolean + Remove the configuration, file to be matched and matched file from + the TargetSmart FTP upon completion or failure of match. + """ # noqa: E501,E261 + + # Generate a match job + job_name = job_name or str(uuid.uuid1()) + + try: + # Upload table + self.sftp.put_file(table.to_csv(), f'{self.sftp_dir}/{job_name}_input.csv') + logger.info(f'Table with {table.num_rows} rows uploaded to TargetSmart.') + + # Create/upload XML configuration + xml = self.create_job_xml(job_type, job_name, emails=emails, + status_key=job_name, call_back=call_back) + self.sftp.put_file(xml, f'{self.sftp_dir}/{job_name}.job.xml') + logger.info(f'Match configuration uploaded to TargetSmart.') + + # Check xml configuration status + self.poll_config_status(job_name) + + # Check the status of the match + self.match_status(job_name) + + # Download the resulting file + tbl = Table.from_csv(self.sftp.get_file(f'{self.sftp_dir}/{job_name}_output.csv')) + + finally: + # Clean up files + if remove_files: + self.remove_files(job_name) + + # Log Stats + # TO DO: Provide some stats on the match + + # Return file as a Table + return tbl
+ + def create_job_xml(self, job_type, job_name, emails=None, status_key=None, call_back=None): + # Internal method to create a valid job xml + + job = ET.Element("job") + + # Generate Base XML + input_file = ET.SubElement(job, 'inputfile') + input_file.text = job_name + '_input.csv' + output_file = ET.SubElement(job, 'outputfile') + output_file.text = job_name + '_output.csv' + jobtype = ET.SubElement(job, 'jobtype', text=job_type) + jobtype.text = job_type + + # Add status key + args = ET.SubElement(job, "args") + statuskey = ET.SubElement(args, "arg", name="__status_key") + statuskey.text = status_key or job_name + + # Option args + if call_back: + callback = ET.SubElement(args, "arg", name="__http_callback") + callback.text = call_back + + if emails: + emails_el = ET.SubElement(args, "arg", name="__emails") + emails_el.text = ','.join(emails) + + # Write xml to file object + local_path = create_temp_file(suffix='.xml') + tree = ET.ElementTree(job) + tree.write(local_path) + return local_path + + def poll_config_status(self, job_name, polling_interval=20): + # Poll the configuration status + + while True: + + time.sleep(polling_interval) + if self.config_status(job_name): + return True + logger.info(f'Waiting on {job_name} job configuration...') + + def config_status(self, job_name): + # Check the status of the configuration by parsing the + # the files in the SFTP directory. + + for f in self.sftp.list_directory(remote_path=self.sftp_dir): + + if f == f'{job_name}.job.xml.good': + logger.info(f'Match job {job_name} configured.') + return True + + elif f == f'{job_name}.job.xml.bad': + logger.info(f'Match job {job_name} configuration error.') + # To Do: Lift up the configuration error. + raise ValueError('Job configuration failed. If you provided an email' + 'address, you will be sent more details.') + + else: + pass + + return False + + def match_status(self, job_name, polling_interval=60): + # You could also poll their API for the status, which was what the original + # version of the automation matching did. Note: The polling API is public + # and does expose some metadata. This happens regardless of anything that + # we do. However, the actually data is only exposed on the secure SFTP. + + while True: + + logger.debug('Match running...') + for file_name in self.sftp.list_directory(remote_path=self.sftp_dir): + + if file_name == f'{job_name}.finish.xml': + + xml_file = self.sftp.get_file(f'{self.sftp_dir}/{job_name}.finish.xml') + with open(xml_file, 'rb') as x: + xml = xmltodict.parse(x, dict_constructor=dict) + + if xml['jobcontext']['state'] == 'error': + # To Do: Parse these in a pretty way + logger.info(f"Match Error: {xml['jobcontext']['errors']}") + raise ValueError(f"Match job failed. {xml['jobcontext']['errors']}") + + elif xml['jobcontext']['state'] == 'success': + logger.info('Match complete.') + + return True + + time.sleep(polling_interval) + + def remove_files(self, job_name): + # Remove all of the files for the match. + + for file_name in self.sftp.list_directory(remote_path=self.sftp_dir): + if job_name in file_name: + self.sftp.remove_file(f'{self.sftp_dir}/{file_name}') + logger.info(f'{file_name} removed from SFTP.')
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/parsons/turbovote/turbovote.html b/docs/html/_modules/parsons/turbovote/turbovote.html new file mode 100644 index 0000000000..880236b380 --- /dev/null +++ b/docs/html/_modules/parsons/turbovote/turbovote.html @@ -0,0 +1,309 @@ + + + + + + + + + + + parsons.turbovote.turbovote — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Module code »
  • + +
  • parsons.turbovote.turbovote
  • + + +
  • + +
  • + +
+ + +
+
+
+
+ +

Source code for parsons.turbovote.turbovote

+from parsons.etl import Table
+import requests
+import logging
+from parsons.utilities import check_env
+
+logger = logging.getLogger(__name__)
+
+TURBOVOTE_URI = 'https://turbovote-admin-http-api.prod.democracy.works/'
+
+
+
[docs]class TurboVote(object): + """ + Instantiate the TurboVote class + + `Args:` + username: str + A valid TurboVote username. Not required if ``TURBOVOTE_USERNAME`` + env variable set. + password: str + A valid TurboVote password. Not required if ``TURBOVOTE_PASSWORD`` + env variable set. + subdomain: str + Your TurboVote subdomain (i.e. ``https://MYORG.turbovote.org``). Not + required if ``TURBOVOTE_SUBDOMAIN`` env variable set. + `Returns:` + class + """ + + def __init__(self, username=None, password=None, subdomain=None): + + self.username = check_env.check('TURBOVOTE_USERNAME', username) + self.password = check_env.check('TURBOVOTE_PASSWORD', password) + self.subdomain = check_env.check('TURBOVOTE_SUBDOMAIN', subdomain) + self.uri = TURBOVOTE_URI + + def _get_token(self): + # Retrieve a temporary bearer token to access API + + url = self.uri + 'login' + payload = {'username': self.username, + 'password': self.password} + r = requests.post(url, data=payload) + logger.debug(r.url) + r.raise_for_status() + + return r.json()['id-token'] + +
[docs] def get_users(self): + """ + Get users. + + `Returns:` + Parsons Table + See :ref:`parsons-table` for output options. + """ + + url = self.uri + f'partners/{self.subdomain}.turbovote.org/users' + + headers = {"Authorization": f"Bearer {self._get_token()}"} + r = requests.get(url, headers=headers) + logger.debug(r) + r.raise_for_status() + tbl = Table.from_csv_string(r.text) + logger.info(f'{tbl.num_rows} users retrieved.') + + return tbl
+
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/_sources/action_kit.rst.txt b/docs/html/_sources/action_kit.rst.txt new file mode 100644 index 0000000000..8fbf46632c --- /dev/null +++ b/docs/html/_sources/action_kit.rst.txt @@ -0,0 +1,61 @@ +ActionKit +========= + +******** +Overview +******** + +`ActionKit `_ is a platform for advocacy, fundraising, and +get-out-the-vote. This Parsons integration with the +`ActionKit REST API `_ +supports fetching, creating, and updating records of campaigns, events, and users. +Bulk upload of new users and user updates is also supported. + +.. note:: + Authentication + ActionKit requires `HTTP Basic Auth `_. + Clients with an ActionKit account can obtain the domain, username, and password needed + to access the ActionKit API. See the `ActionKit REST API Authentication `_ + documentation for more information on obtaining ActionKit API credentials. + +********** +Quickstart +********** + +To instantiate the ActionKit class, you can either store your ActionKit API +domain, username, and password as environmental variables (``ACTION_KIT_DOMAIN``, +``ACTION_KIT_USERNAME``, and ``ACTION_KIT_PASSWORD``, respectively) or pass in your +domain, username, and password as arguments: + +.. code-block:: python + + from parsons import ActionKit + + # First approach: Use API credentials via environmental variables + ak = ActionKit() + + # Second approach: Pass API credentials as arguments + ak = ActionKit(domain='myorg.actionkit.com', username='my_name', password='1234') + +You can then call various endpoints: + +.. code-block:: python + + # Create a new user + ak.create_user(email='john@email', first_name='John', last_name='Smith', city='Boston') + + # Fetch user fields + user_fields = ak.get_user(user_id='123') + + # Update user fields + ak.update_user(user_id='123', city='New York') + + # Delete uer + ak.delete_user(user_id='123') + +*** +API +*** + +.. autoclass :: parsons.ActionKit + :inherited-members: diff --git a/docs/html/_sources/aws.rst.txt b/docs/html/_sources/aws.rst.txt new file mode 100644 index 0000000000..2700d31b12 --- /dev/null +++ b/docs/html/_sources/aws.rst.txt @@ -0,0 +1,167 @@ +Amazon Web Services +=================== + +****** +Lambda +****** + +=== +API +=== + +.. autofunction :: parsons.aws.distribute_task +.. autofunction :: parsons.aws.event_command + +*** +S3 +*** + +======== +Overview +======== + +S3 is Amazon Web Service's object storage service that allows users to store and access data objects. The Parson's class is a high level wrapper of the AWS SDK `boto3 `_. It allows users to upload and download files from S3 as well as manipulate buckets. + +.. note:: + Authentication + Access to S3 is controlled through AWS Identity and Access Management (IAM) users in the `AWS Managerment Console `_ . Users can be granted granular access to AWS resources, including S3. IAM users are provisioned keys, which are required to access the S3 class. + +========== +QuickStart +========== + +Instantiate class with credentials. + +.. code-block:: python + + from parsons import S3 + + # First approach: Use API credentials via environmental variables + s3 = S3() + + # Second approach: Pass API credentials as arguments + s3 = S3(aws_access_key_id='MY_KEY', aws_secret_access_key='MY_SECRET') + + # Third approach: Use credentials stored in AWS CLI file ~/.aws/credentials + s3 = S3() + +You can then call various endpoints: + +.. code-block:: python + + from parsons import S3, Table + + s3 = S3(aws_access_key_id='MY_KEY', aws_secret_access_key='MY_SECRET') + + # Put an arbitrary file in an S3 bucket + with open('winning_formula.csv') as w: + s3.put_file('my_bucket', 'winning.csv, w) + + # Put a Parsons Table as a CSV using convenience method. + tbl = Table.from_csv('winning_formula.csv') + tbl.to_s3_csv('my_bucket', 'winning.csv') + + # Download a csv file and convert to a table + f = s3.get_file('my_bucket', 'my_dir/my_file.csv') + tbl = Table(f) + + # List buckets that you have access to + s3.list_buckets() + + # List the keys in a bucket + s3.list_keys('my_bucket') + +=== +API +=== + +.. autoclass :: parsons.S3 + :inherited-members: + :members: + +******** +Redshift +******** + +.. _redshift: + +======== +Overview +======== + +The Redshift class allows you to interact with an `Amazon Redshift `_ relational database. The Redshift Connector utilizes the ``psycopg2`` python package to connect to the database. + +.. note:: + S3 Credentials + Redshift only allows data to be copied to the database via S3. As such, the the :meth:`copy` and :meth:`copy_s3()` + methods require S3 credentials and write access on an S3 Bucket, which will be used for storing data en route to + Redshift. + Whitelisting + Remember to ensure that the IP address from which you are connecting has been whitelisted. + +========== +Quickstart +========== + +**Query the Database** + +.. code-block:: python + + from parsons import Redshift + rs = Redshift() + table = rs.query('select * from tmc_scratch.test_data') + +**Copy a Parsons Table to the Database** + +.. code-block:: python + + from parsons import Redshift + rs = Redshift() + table = rs.copy(tbl, 'tmc_scratch.test_table', if_exists='drop') + +All of the standard copy options can be passed as kwargs. See the :meth:`copy` method for all +options. + +======== +Core API +======== +Redshift core methods focus on input, output and querying of the database. + +.. autoclass :: parsons.Redshift + +.. autofunction:: parsons.Redshift.connection + +.. autofunction:: parsons.Redshift.query + +.. autofunction:: parsons.Redshift.query_with_connection + +.. autofunction:: parsons.Redshift.copy + +.. autofunction:: parsons.Redshift.copy_s3 + +.. autofunction:: parsons.Redshift.unload + +.. autofunction:: parsons.Redshift.upsert + +.. autofunction:: parsons.Redshift.generate_manifest + +.. autofunction:: parsons.Redshift.alter_table_column_type + +================== +Table and View API +================== +Table and view utilities are a series of helper methods, all built off of commonly +used SQL queries run against the Redshift database. + +.. autoclass :: parsons.databases.redshift.redshift.RedshiftTableUtilities + :inherited-members: + +========== +Schema API +========== +Schema utilities are a series of helper methods, all built off of commonly +used SQL queries run against the Redshift database. + +.. autoclass :: parsons.databases.redshift.redshift.RedshiftSchema + :inherited-members: + diff --git a/docs/html/_sources/civis.rst.txt b/docs/html/_sources/civis.rst.txt new file mode 100644 index 0000000000..775cf8554a --- /dev/null +++ b/docs/html/_sources/civis.rst.txt @@ -0,0 +1,43 @@ +Civis +===== + +******** +Overview +******** + +The `Civis Platform `_ is a cloud-based data science platform. +This Parsons connector utilizes the `Civis API Python client `_ +to interact with the Civis Platform. It supports executing Civis SQL queries and writing Parsons Tables to a Civis +Redshift cluster. + +.. note:: + Authentication + The ``CivisClient`` class requires your Redshift database ID or name, and an API Key. To obtain an API Key, log in to + Civis and follow the instructions for `Creating an API Key `_. + +********** +Quickstart +********** + +To instantiate the ``CivisClient`` class, you can either store your database identifier and API Key as +environmental variables (``CIVIS_DATABASE`` and ``CIVIS_API_KEY``) or pass them as keyword arguments. + +.. code-block:: python + from parsons import CivisClient + + # First approach: Authorize with environmental variables + civis = CivisClient() + + # Second approach: Pass API credentials as arguments + civis = CivisClient(db='my_db_name', api_key='my_api_key') + + # Execute a Civis query + civis.query(sql="SELECT * FROM my_table") + +*** +API +*** + +.. autoclass :: parsons.CivisClient + :inherited-members: + :members: \ No newline at end of file diff --git a/docs/html/_sources/facebook_ads.rst.txt b/docs/html/_sources/facebook_ads.rst.txt new file mode 100644 index 0000000000..d0e078beff --- /dev/null +++ b/docs/html/_sources/facebook_ads.rst.txt @@ -0,0 +1,63 @@ +FacebookAds +=========== + +******** +Overview +******** + +The ``FacebookAds`` class allows you to interact with parts of the Facebook Business API. +Currently the connector provides methods for creating and deleting custom audiences, and for adding users to audiences. + +The ``FacebookAds`` connector is a thin wrapper around the `FB Business SDK `_, +so some of that SDK is exposed, e.g., you may see exceptions like ``FacebookRequestError``. + +Facebook's advertising and Pages systems are massive. Check out the overviews for more information: + +* `Facebook Business Overview `_ +* `Facebook Custom Audiences `_ +* `Facebook Marketing API `_ + +.. note:: + Authentication + Before using ``FacebookAds``, you'll need the following: + * A FB application, specifically the app ID and secret. See ``_ to find your app details or create a new app. Note that a Facebook app isn't necessarily visible to anyone but you: it's just needed to interact with the Facebook API. + * A FB ad account. See ``_ to find your ad accounts or create a new one. + * A FB access token representing a user that has access to the relevant ad account. You can generate an access token from your app, either via the Facebook API itself, or via console at ``_. + +********** +Quickstart +********** + +To instantiate the FacebookAds class, you can either store your authentication credentials as environmental variables +(``FB_APP_ID``, ``FB_APP_SECRET``, ``FB_ACCESS_TOKEN``, and ``FB_AD_ACCOUNT_ID``) or pass them in as arguments: + +.. code-block:: python + + from parsons import FacebookAds + + # First approach: Use environmental variables + fb = FacebookAds() + + # Second approach: Pass credentials as argument + fb = FacebookAds(app_id='my_app_id', + app_secret='my_app_secret', + access_token='my_access_token', + ad_account_id='my_account_id') + +You can then use various methods: + +.. code-block:: python + + # Create audience + fb.create_custom_audience(name='audience_name', data_source='USER_PROVIDED_ONLY') + + # Delete audience + fb.delete_custom_audience(audience_id='123') + +*** +API +*** + +.. autoclass :: parsons.FacebookAds + :inherited-members: + :members: \ No newline at end of file diff --git a/docs/html/_sources/google_civic.rst.txt b/docs/html/_sources/google_civic.rst.txt new file mode 100644 index 0000000000..bb2aeb7871 --- /dev/null +++ b/docs/html/_sources/google_civic.rst.txt @@ -0,0 +1,6 @@ +Google Civic +============ + + +.. autoclass :: parsons.GoogleCivic + :inherited-members: \ No newline at end of file diff --git a/docs/html/_sources/google_sheets.rst.txt b/docs/html/_sources/google_sheets.rst.txt new file mode 100644 index 0000000000..9aa9369bb3 --- /dev/null +++ b/docs/html/_sources/google_sheets.rst.txt @@ -0,0 +1,11 @@ +GoogleSheets +============= + +The GoogleSheets class allows you to interact with a Google Drive spreadsheet. + +In order to instantiate the class, you must pass Google Drive credentials as a dictionary, or store the credentials as a JSON string in the ``GOOGLE_DRIVE_CREDENTIALS`` environment variable. Typically you'll get the credentials from the Google Developer Console (look for the "Google Drive API"). + + +.. autoclass :: parsons.GoogleSheets + :inherited-members: + :members: \ No newline at end of file diff --git a/docs/html/_sources/index.rst.txt b/docs/html/_sources/index.rst.txt new file mode 100644 index 0000000000..9b107c8d1c --- /dev/null +++ b/docs/html/_sources/index.rst.txt @@ -0,0 +1,224 @@ +.. Parsons documentation master file, created by + sphinx-quickstart on Sat Sep 8 14:41:56 2018. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +About +===== + +Parsons, named after `Lucy Parsons `_, is a Python package that contains a growing list of connectors and integrations to move data between various tools. Parsons is focused on integrations and connectors for tools utilized by the progressive community. + +Parsons was built out of a belief that progressive organizations spend far too much time building the same integrations, over and over and over again, while they should be engaged in more important and impactful work. It +was built and is maintained by The Movement Cooperative. + +The Movement Cooperative +======================== +The Movement Cooperative is a member led organization focused on providing data, tools and strategic support for the progressive community. Our mission is to break down technological barriers for organizations that fight for social justice. + +License and Usage +================= +Usage of Parsons is governed by the `TMC Parsons License `_, which allows for unlimited non-commercial usage, provided that individuals and organizations adhere to our broad values statement. + +Design Goals +============ +The goal of Parsons is to make the movement of data between systems as easy and straightforward as possible. Simply put, we seek to reduce the lines of code that are written by the progressive community. Not only is this a waste of time, but we rarely have the capacity and resources to fully unittest our scripts. + +.. image:: /_static/parsons_diagram.png + +Parsons seeks to be flexible from a data ingestion and output perspective, while providing ETL tools that recognize that our data is **always** messy. Central to this concept is the :ref:`parsons-table` the table-like object that most methods return. + +QuickStart +========== + + +.. code-block:: python + + # VAN - Download activist codes to a CSV + + from parsons import VAN + van = VAN(db='MyVoters') + ac = van.get_activist_codes() + ac.to_csv('my_activist_codes.csv') + + # Redshift - Create a table from a CSV + + from parsons import Table + tbl = Table.from_csv('my_table.csv') + tbl.to_redshift('my_schema.my_table') + + # Redshift - Export from a query to CSV + + from parsons import Redshift + sql = 'select * from my_schema.my_table' + rs = Redshift() + tbl = rs.query(sql) + tbl.to_csv('my_table.csv') + + # Upload a file to S3 + + from parsons import S3 + s3 = S3() + s3.put_file('my_bucket','my_table.csv') + + # TargetSmart - Append data to a record + + from parsons import TargetSmart + ts = TargetSmart(api_key='MY_KEY') + record = ts.data_enhance(231231231, state='DC') + +Sources +======= +* Documentation: ``_ +* Source Code: ``_ + +Installation +============ +You can install the most recent release by running: ``pip install parsons`` + +Logging +======= +Parsons uses the `native python logging system `_. By default, log output will go to the console and look like: + +.. code-block:: none + + parsons.modulename LOGLEVEL the specific log message + +In your scripts that use Parsons, if you want to override the default Parsons logging behavior, just grab the "parsons" logger and tweak it: + +.. code-block:: python + + import logging + parsons_logger = logging.getLogger('parsons') + # parsons_logger.setLevel('DEBUG') + # parsons_logger.addHandler(...) + # parsons_logger.setFormatter(...) + +Minimizing Resource Utilization +=============================== + +A primary goal of Parsons is to make installing and using the library as easy as possible. Many +of the patterns and examples that we document are meant to show how easy it can be to use Parsons, +but sometimes these patterns trade accessibility for performance. + +In environments where efficiency is important, we recommend users take the following steps to +minimize resource utilization: + + 1. Don't import classes from the root Parsons package + 2. Install only the dependencies you need + +*** Don't import from the root Parsons package + +Throughout the Parsons documentation, users are encouraged to load Parsons classes like so: + +```python +from parsons import Table +``` + +In order to support this pattern, Parsons imports all of its classes into the root `parsons` +package. Due to how Python loads modules and packages, importing even one Parsons class results +in ALL of them being loaded. In order to avoid the resource consumption associated with loading all +of Parsons, we have created a mechanism to skip loading of call of the Parsons classes. + +If you set `PARSONS_SKIP_IMPORT_ALL` in your environment, Parsons will not import all of its classes +into the root `parsons` package. Setting this environment variable means you will **NOT** be able to +import using the `from parsons import X` pattern. Instead, you will need to import directly from the +package where a class is defined (e.g. `from parsons.etl import Table`). + +If you use the `PARSONS_SKIP_IMPORT_ALL` and import directly from the appropriate sub-package, +you will only load the classes that you need and will not consume extra resources. Using this +method, you may see as much as an 8x decrease in memory usage for Parsons. + +*** Install only the dependencies you need + +Since Parsons needs to talk to so many different API's, it has a number of dependencies on other +Python libraries. It may be preferable to only install those external dependencies that you will +use. + +For example, if you are running on Google Cloud, you might not need to use any of Parsons' AWS +connectors. If you don't use any of Parsons' AWS connectors, then you won't need to install the +Amazon Boto3 library that Parsons uses to access the Amazon APIs. + +By default, installing Parsons will install all of its external dependencies. You can prevent +these dependencies from being installed with Parsons by passing the `--no-deps` flag to pip +when you install Parsons. + +``` +> pip install --no-deps parsons +``` + +Once you have Parsons installed without these external dependencies, you can then install +the libraries as you need them. You can use the requirements.txt as a reference to figure +out which version you need. At a minimum you will need to install the following libraries +for Parsons to work at all: + +* petl + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + +.. toctree:: + :maxdepth: 1 + :caption: Integrations + :name: integrations + + action_kit + action_network + airtable + aws + azure + bill_com + bloomerang + box + braintree + civis + copper + crowdtangle + databases + facebook_ads + freshdesk + github + google + hustle + mailchimp + mobilize_america + newmode + ngpvan + pdi + p2a + redash + rockthevote + salesforce + sftp + targetsmart + turbovote + twilio + zoom + +.. toctree:: + :maxdepth: 1 + :caption: Enhancements + :name: enhancements + + census_geocoder + +.. toctree:: + :maxdepth: 1 + :caption: Framework + :name: framework + + dbsync + table + notifications + utilities + +.. toctree:: + :maxdepth: 1 + :caption: Contributor Documentation + :name: contrib_docs + + contributing + build_a_connector diff --git a/docs/html/_sources/logging.rst.txt b/docs/html/_sources/logging.rst.txt new file mode 100644 index 0000000000..52ce6bd387 --- /dev/null +++ b/docs/html/_sources/logging.rst.txt @@ -0,0 +1,113 @@ + + +Logger +======= + +The Logger class is a wrapper to python's ``logging`` module. It is an attempt +to standardize and simplify logging in Parsons. All Parson submodules will have +logging implemented. + +.. note:: + By default, logging is not set and therefore no log messages are output. See + Quickstart_ below for option to turn it. + on. + + +.. _Quickstart: + +********** +Quickstart +********** + +To view the standard log outputs of any Parsons module, pass in ``log=True``. + +.. code-block:: python + + from parsons import VAN + + van = VAN(db='MyVoters', log=True) # Use the default log configuration + + van.events() + + >>> Getting events... + >>> Found 10 events. + +Additionally, the Logger class is available for use in any arbitrary script. +See `Default Configurations`_ for more details. + +.. code-block:: python + + from parsons.logging.plogger import Logger + + logger = Logger(__name__) + + logger.info('This is an info log') + + # By default debug is not printed to sys.stdout. + logger.debug('This is a debug log') + + >>> This is an info log + +********************** +Configuring the Logger +********************** + +Currently, only three handlers are natively supported, with the hope to add more. +The handlers that are supported are: +- StreamHandler +- FileHandler +- SlackHandler + +Most configurations option that can be done in the ``logging`` should be +supported by the Logger class. + +.. code-block:: python + + logger = Logger(__name__, handlers=[]) + + logger.add_file_handler('name_of_log_file.log', level='DEBUG') + + logger.add_stream_handler(sys.stdout, level-'INFO') + + logger.add_slack_handler( + os.environ['SLACK_URLS_PASSWORD'], + 'db_test_chan', + level='ERROR') + +To set the format, you can either pass in a Formatter object or a string format. + +.. code-block:: python + + logger.add_file_handler( + 'name_of_log_file.log', + level='DEBUG', + sformat='%(asctime)s:%(name)s:%(levelname)s:%(message)s') + + strm_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s') + logger.add_stream_handler(sys.stdout, level-'INFO', formatter=strm_format) + +.. _`Default Configurations`: + +********************* +Default Configuration +********************* + +The default logging configuration is: + +- ``StreamHandler`` + + - level: ``'INFO'`` + - format: ``'%(message)s'`` + - stream: ``sys.out`` + +- ``FileHandler`` + + - level: ``'DEBUG'`` + - format: ``'%(asctime)s:%(name)s:%(levelname)s:%(message)s'`` + - file: ``__name__.log`` + +****** +Logger +****** +.. autoclass:: parsons.logging.plogger.Logger + :inherited-members: diff --git a/docs/html/_sources/mobile_commons.rst.txt b/docs/html/_sources/mobile_commons.rst.txt new file mode 100644 index 0000000000..2f0c015740 --- /dev/null +++ b/docs/html/_sources/mobile_commons.rst.txt @@ -0,0 +1,22 @@ +Mobile Commons +============== + +The MobileCommons class leverages the API of `Upland Mobile `_ (nee Mobile Commons). + +********* +Campaigns +********* +.. autoclass:: parsons.mobile_commons.mobile_commons.Campaigns + :inherited-members: + +****** +Groups +****** +.. autoclass:: parsons.mobile_commons.mobile_commons.Groups + :inherited-members: + +******** +Profiles +******** +.. autoclass:: parsons.mobile_commons.mobile_commons.Profiles + :inherited-members: diff --git a/docs/html/_sources/mobilize_america.rst.txt b/docs/html/_sources/mobilize_america.rst.txt new file mode 100644 index 0000000000..0f6b8ebabc --- /dev/null +++ b/docs/html/_sources/mobilize_america.rst.txt @@ -0,0 +1,49 @@ +Mobilize America +================ + +******** +Overview +******** + +`Mobilize America `_ is an activist signup tool used by progressive organizations. +This class provides several methods for fetching organizations, people, and events from their +`API `_, which is currently in alpha development. + +.. note:: + Authentication + Some methods in the ``MobilizeAmerica`` class require an API Key furnished by Mobilize America (private methods), + while others do not (public methods). Each method in this class contains a note indicating whether it is public + or private. For more information, see the `API documentation `_. + +********** +Quickstart +********** + +If you instantiate ``MobilizeAmerica`` without an API Key, you can only use public methods: + +.. code-block:: python + from parsons import MobilizeAmerica + + # Instantiate class without API key + ma = MobilizeAmerica() + + # Use public method to get all organizations + ma.get_organizations() + + +In order to use private methods, you must provide an API key either by setting the environmental +variable ``MOBILIZE_AMERICA_API_KEY`` or by passing an ``api_key`` argument as shown below: + +.. code-block:: python + # Instantiate class without API key as argument + ma = MobilizeAmerica(api_key='my_api_key') + + # Use private method to get all people + ma.get_people() + +*** +API +*** + +.. autoclass :: parsons.MobilizeAmerica + :inherited-members: \ No newline at end of file diff --git a/docs/html/_sources/ngpvan.rst.txt b/docs/html/_sources/ngpvan.rst.txt new file mode 100644 index 0000000000..826ecbf2e9 --- /dev/null +++ b/docs/html/_sources/ngpvan.rst.txt @@ -0,0 +1,372 @@ +NGPVAN +====== + + +******** +Overview +******** + +The VAN module leverages the VAN API and generally follows the naming convention of their API endpoints. It +is recommended that you reference their `API documentation `_ to +additional details and information. + +.. note:: + API Keys + - API Keys are specific to each committee and state. + - There is a Parsons type API Key that can be requested via the Integrations menu on the main page. + If you have an issue gaining access to this key, or an admin has questions, please email + . + + +.. warning:: + VANIDs + VANIDs are unique to each state and instance of the VAN. VANIDs used for the AV VAN **will not** match + those of the SmartVAN or VoteBuilder. + Maintenance & Suppoort + VAN/EveryAction is not responsible for support of Parsons. Their support team cannot answer questions + about Parsons. Please direct any questions + +.. toctree:: + :maxdepth: 1 + +********** +QuickStart +********** + +To call the VAN class you can either store the api key as an environmental variable VAN_API_KEY or pass it in as an argument.. + +.. code-block:: python + + from parsons import VAN + + van = VAN(db='MyVoters') # Specify the DB type and pass api key via environmental variable. + + van = VAN(api_key='asdfa-sdfadsf-adsfasdf',db='MyVoters') # Pass api key directly + +You can then call various endpoints: + +.. code-block:: python + + from parsons import VAN + + van = VAN(db='MyVoters') + + # List events with a date filter + events = van.get_events(starting_before='2018-02-01') + + # List all folders shared with API Key User + folders = van.get_folders() + + # Return to a Redshift database + saved_lists = van.get_saved_lists().to_redshift('van.my_saved_lists') + +This a is just a small sampling of all of the VAN endpoints that you can leverage. We recommend reviewing the +documentation for all functions. + +**************** +Common Workflows +**************** + +=========== +Bulk Import +=========== +For some methods, VAN allows you to bulk import multiple records to create or modify them. + +The bulk upload endpoint, requires access to file on the public internet as it runs the upload +asynchronously. Therefore, in order to bulk import, you must pass in cloud storage credentials +so that the file can be posted. Currently, only S3 is supported. + +**Bulk Apply Activist Codes** + +.. code-block:: python + + from parsons import VAN, Table + + van = VAN(db=EveryAction) + + # Load a table containing the VANID, activistcodeid and other options. + tbl = Table.from_csv('new_volunteers.csv') + + # Table will be sent to S3 bucket and a POST request will be made to VAN creating + # the bulk import job with all of the valid meta information. The method will + # return the job id. + job_id = van.bulk_apply_activist_codes(tbl, url_type="S3", bucket='my_bucket') + + # The bulk import job is run asynchronously, so you may poll the status of a job. + job_status = van.get_bulk_import_job(job_id) + +============================ +Scores: Loading and Updating +============================ + +Loading a score is a multi-step process. Once a score is set to approved, loading takes place overnight. + +**Standard Auto Approve Load** + +.. code-block:: python + + from parsons import VAN, Table + + van = VAN(db='MyVoters') # API key stored as an environmental variable + + # If you don't know the id, you can run van.get_scores() to list the + # slots that are available and their associated score ids. + score_id = 9999 + + # Load the Parsons table with the scores. The first column of the table + # must be the person id (e.g. VANID). You could create this from Redshift or + # another source. + tbl = Table.from_csv('winning_scores.csv') + + # Specify the score id slot and the column name for each score. + config = [{'score_id': score_id, 'score_column': 'winning_model'}] + + # If you have multiple models in the same file, you can load them all at the same time. + # In fact, VAN recommends that you do so to reduce their server loads. + config = [{'score_id': 5555, 'score_column': 'score1'}, {'score_id': 5556, 'score_column': 'score2'}] + + # The score file must posted to the internet. This configuration uses S3 to do so. In this + # example, your S3 keys are stored as environmental variables. If not, you can pass them + # as arguments. + job_id = van.upload_scores(tbl, config, url_type='S3', email='info@tmc.org', bucket='tmc-fake') + +**Standard Load Requiring Approval** + +.. code-block:: python + + from parsons import VAN + + van = VAN(db='MyVoters') # API key stored as an environmental variable + config = [{'score_id': 3421, 'score_column': 'winning_model'}] + + # Note that auto_approve is set to False. This means that you need to manually approve + # the job once it is loaded. + job_id = van.upload_scores(tbl, config, url_type='S3', email='info@tmc.org', + bucket='tmc-fake', auto_approve=False) + + # Approve the job + van.update_score_status(job_id,'approved') + +=========================== +People: Add Survey Response +=========================== +The following workflow can be used to apply survey questions, activist codes +and canvass responses. + +.. code-block:: python + + from parsons import VAN + + # Instantiate Class + van = VAN(db="MyVoters") + + van_id = 13242 + sq = 311838 # Valid survey question id + sr = 1288926 # Valid survey response id + ct = 36 # Valid contact type id + it = 4 # Valid input type id + + # Create a valid survey question response + van.apply_survey_response(vanid, sq, sr, contact_type_id=ct, input_type_id=it) + +============================= +Event: Creating and Modifying +============================= + +Events are made up of sub objects that need to exist to create an event + +* Event Object - The event itself +* Event Type - The type of event, such as a `Canvass` or `Phone Bank`. These are created + in the VAN UI and can be reused for multiple events. +* Locations - An event can have multiple locations. While not required to initially create an + event, these are required to add signups to an event. +* Roles - The various roles that a person can have at an event, such as ``Lead`` or + ``Canvasser``. These are set as part of the event type. +* Shifts - Each event can have multiple shits in which a person can be assigned. These are + specified in the event creation. + +.. code-block:: python + + from parsons import VAN + + # Instantiate class + van = VAN(db="EveryAction") + + # Create A Location + loc_id = van.location(name='Big `Ol Canvass', address='100 W Washington', city='Chicago', state='IL') + + # Create Event + name = 'GOTV Canvass' # Name of event + short_name = 'GOTVCan' # Short name of event, 12 chars or less + start_time = '2018-11-01T15:00:00' # ISO formatted date + end_time = '2018-11-01T18:00:00' # ISO formatted date after start time + event_type_id = 296199 # A valid event type id + roles = [259236] # A list of valid role ids + location_ids = [loc_id] # An optional list of locations ids for the event + description = 'CPD Super Volunteers Canvass' # Optional description of 200 chars or less + shifts = [{'name': 'Shift 1', + 'start_time': '2018-11-01T15:00:00', + 'end_time': '2018-11-11T17:00:00'}] # Shifts must fall within event start/end time. + + new_event = van.event_create(name, short_name, start_time, end_time, event_type_id, roles, + location_ids=location_ids, shifts=shifts, description=description) + + +============================ +Signup: Adding and Modifying +============================ + +.. code-block:: python + + from parsons import VAN + + # Instantiate class + van = VAN(db="EveryAction") + + # Create a new signup + + vanid = 100349920 + event_id = 750001004 + shift_id = 19076 + role_id = 263920 + location_id = 3 + role_id = 263920 + status_id = 11 + + # Create the signup. Will return a signup id + signup_id = van.signup_create(vanid, event_id, shift_id, role_id, status_id, location_id + + # Modify a status of the signup + new_status_id = 6 + van.signup_update(signup_id, status_id=new_status_id) + +*** +API +*** + +====== +People +====== +.. autoclass:: parsons.ngpvan.van.People + :inherited-members: + +============== +Activist Codes +============== +.. autoclass:: parsons.ngpvan.van.ActivistCodes + :inherited-members: + +=========== +Bulk Import +=========== +.. autoclass:: parsons.ngpvan.van.BulkImport + :inherited-members: + +================= +Canvass Responses +================= +.. autoclass:: parsons.ngpvan.van.CanvassResponses + :inherited-members: + +================ +Changed Entities +================ +.. autoclass:: parsons.ngpvan.van.ChangedEntities + :inherited-members: + +===== +Codes +===== +.. autoclass:: parsons.ngpvan.van.Codes + :inherited-members: + +============= +Custom Fields +============= +.. autoclass:: parsons.ngpvan.van.CustomFields + :inherited-members: + +====== +Events +====== +.. autoclass:: parsons.ngpvan.van.Events + :inherited-members: + +=========== +Export Jobs +=========== +.. autoclass:: parsons.ngpvan.van.ExportJobs + :inherited-members: + +================= +File Loading Jobs +================= +.. autoclass:: parsons.ngpvan.van.FileLoadingJobs + :inherited-members: + +======= +Folders +======= +.. note:: + A folder must be shared with the user associated with your API key to + be listed. + +.. autoclass:: parsons.ngpvan.van.Folders + :inherited-members: + +========= +Locations +========= +.. autoclass:: parsons.ngpvan.van.Locations + :inherited-members: + +=========== +Saved Lists +=========== +.. note:: + A saved list must be shared with the user associated with your API key to + be listed. + +.. autoclass:: parsons.ngpvan.van.SavedLists + :inherited-members: + +====== +Scores +====== +Prior to loading a score for the first time, you must contact VAN support to request +a score slot. + +.. note:: + Score Auto Approval + Scores can be automatically set to ``approved`` through the :meth:`VAN.upload_scores` + method allowing you to skip calling :meth:`VAN.update_score_status`, if the average of + the scores is within the fault tolerance specified by the user. It is only available + to API keys with permission to automatically approve scores. + + +.. autoclass:: parsons.ngpvan.van.Scores + :inherited-members: + +======= +Signups +======= +.. autoclass:: parsons.ngpvan.van.Signups + :inherited-members: + +================ +Supporter Groups +================ +.. autoclass:: parsons.ngpvan.van.SupporterGroups + :inherited-members: + +================ +Survey Questions +================ +.. autoclass:: parsons.ngpvan.van.SurveyQuestions + :inherited-members: + +======= +Targets +======= +.. autoclass:: parsons.ngpvan.van.Targets + :inherited-members: diff --git a/docs/html/_sources/notifications.rst.txt b/docs/html/_sources/notifications.rst.txt new file mode 100644 index 0000000000..f9a98402ee --- /dev/null +++ b/docs/html/_sources/notifications.rst.txt @@ -0,0 +1,145 @@ +Notifications +============= + + +========== +Slack +========== + + +******** +Overview +******** + +The Slack module leverages the Slack API and provides way to easily send +notifications through Slack. It is recommended that you reference the +`Slack API documentation `_ for additional details and +information. + +.. note:: + API Tokens + - Slack API Tokens are required to use this module. To obtain an API + Token, `create a Slack App `_ associated + with the desired Slack workspace. Once you create the app, navigate + to 'OAuth & Permissions' and add the following OAuth scopes: + + `channels:read`, `users:read`, `chat:write`, and `files:write` + + You can now install the Slack App, which will produce an API Token. + Note that you can change the scopes any time, but you must reinstall + the app each time (your API Token will stay the same). + - Slack has rate limits on all its endpoints. + +.. toctree:: + :maxdepth: 1 + +********** +QuickStart +********** + +To call the Slack class you can either store the API Token as an environment +variable `SLACK_API_TOKEN` or pass it in as an argument. + +.. code-block:: python + + from parsons import Slack + + slack = Slack() # Initiate class via environment variable api token + + slack = Slack(api_key='my-api-tkn') # Pass api token directly + +You can then send messages: + +.. code-block:: python + + from parsons import Slack + + slack = Slack() + + # send a simple messsage + slack.message_channel("my_channel", "Hello from python script") + + # share a file + slack.upload_file(["channel_1", "channel_2"], "my_slack_file.txt") + +*** +API +*** +.. autoclass:: parsons.Slack + :inherited-members: + + +========== +Gmail +========== + + +******** +Overview +******** + +The Gmail module leverages the Gmail API and provides an way to easily send +notifications through email. It is recommended that you reference the +`Gmail API documentation `_ for +additional details and information. + +.. note:: + Credentials and token + - Credentials are required to use the class + - You will need to pass in the path to the credentials and to where a + generated token will be saved. Typically you’ll get the credentials from + the Google Developer Console (look for the “Gmail API”). + +.. note:: + 6MB Attachment Size Limit + - Currently there is a limit of 6MB when sending attachments. + +.. toctree:: + :maxdepth: 1 + +********** +QuickStart +********** + +To call the Gmail class you will need to pass in the path to a +`credentials.json` and the path to `tokens.json`. + +.. code-block:: python + + from parsons import Gmail + + gmail = Gmail( + creds_path="~/secret_location/credentials.json", + token_path="~/secret_location/token.json") + +The easiest way to send a message: + +.. code-block:: python + + gmail.send_email( + "sender@email.com", + "recipient@email.com", + "The Subject", + "This is the text body of the email") + +The current version also supports sending html emails and emails with +attachments. + +.. code-block:: python + + gmail.send_email( + "sender@email.com", + "recipient@email.com", + "An html email with attachments", + "This is the text body of the email", + html="

This is the html part of the email

", + files=['file1.txt', 'file2.txt']) + +Additionally, you can create a raw email messages and send it. See below for +more details. + +*** +API +*** +.. autoclass:: parsons.Gmail + :inherited-members: diff --git a/docs/capitolcanary.rst b/docs/html/_sources/p2a.rst.txt similarity index 54% rename from docs/capitolcanary.rst rename to docs/html/_sources/p2a.rst.txt index 0de94721ab..b1f8dd7d00 100644 --- a/docs/capitolcanary.rst +++ b/docs/html/_sources/p2a.rst.txt @@ -1,30 +1,30 @@ -CapitolCanary -============= +Phone2Action +============ ******** Overview ******** -`CapitolCanary `_ is a digital advocacy tool used by progressive organizations. This class +`Phone2Action `_ is a digital advocacy tool used by progressive organizations. This class allows you to interact with the tool by leveraging their `API `_. .. note:: Authentication - You will need to email CapitolCanary to request credentials to access the API. The credentials consist of an app ID and an app key. + You will need to email Phone2Action to request credentials to access the API. The credentials consist of an app ID and an app key. *********** Quick Start *********** -To instantiate the ``CapitolCanary`` class, you can either pass in the app ID and app key as arguments or set the -``CAPITOLCANARY_APP_ID`` and ``CAPITOLCANARY_APP_KEY`` environmental variables. +To instantiate the ``Phone2Action`` class, you can either pass in the app ID and app key as arguments or set the +``PHONE2ACTION_APP_ID`` and ``PHONE2ACTION_APP_KEY`` environmental variables. .. code-block:: python - from parsons import CapitolCanary + from parsons import Phone2Action # Instantiate the class using environment variables - cc = CapitolCanary() + p2a = Phone2Action() # Get all advocates updated in the last day import datetime @@ -33,18 +33,18 @@ To instantiate the ``CapitolCanary`` class, you can either pass in the app ID an # get_advocates returns a dictionary that maps the advocate data (e.g. phones) to a parsons # Table with the data for each advocate - advocates_data = cc.get_advocates(updated_since=yesterday) + advocates_data = p2a.get_advocates(updated_since=yesterday) # For all of our advocates' phone numbers, opt them into SMS for phone in advocates_data['phones']: phone_number = phone['phones_address'] # Only update phone numbers that aren't already subscribed if phone['subscribed']: - cc.update_advocate(phone['advocate_id'], phone=phone_number, sms_opt_in=True) + p2a.update_advocate(phone['advocate_id'], phone=phone_number, sms_opt_in=True) *** API *** -.. autoclass :: parsons.CapitolCanary +.. autoclass :: parsons.Phone2Action :inherited-members: diff --git a/docs/html/_sources/redshift.rst.txt b/docs/html/_sources/redshift.rst.txt new file mode 100644 index 0000000000..d909e15899 --- /dev/null +++ b/docs/html/_sources/redshift.rst.txt @@ -0,0 +1,57 @@ +Redshift +======== + +******** +Overview +******** + +The Redshift class allows you to interact with an `Amazon Redshift `_ relational batabase. The Redshift Connector utilizes the ``psycopg2`` python package to connect to the database. + +In order to instantiate the class you must pass valid kwargs or store the following +environmental variables: + +* ``'REDSHIFT_USERNAME'`` +* ``'REDSHIFT_PASSWORD'`` +* ``'REDSHIFT_HOST'`` +* ``'REDSHIFT_DB'`` +* ``'REDSHIFT_PORT'`` + + +.. note:: + S3 Credentials + Redshift only allows data to be copied to the database via S3. As such, you need to include AWS + S3 credentials in your copy methods or, better yet, store them as environmental variables. + In addition, you'll need to provide the env var `S3_TEMP_BUCKET`, which is the bucket name used + for storing data en route to Redshift. + Whitelisting + Remember to ensure that the IP address from which you are connecting has been whitelisted. + +********** +Quickstart +********** + +**Query the Database** + +.. code-block:: python + + from parsons import Redshift + rs = Redshift() + table = rs.query('select * from tmc_scratch.test_data') + +**Copy a Parsons Table to the Database** + +.. code-block:: python + + from parsons import Redshift + rs = Redshift() + table = rs.copy(tbl, 'tmc_scratch.test_table', if_exists='replace') + +All of the standard copy options can be passed as kwargs. See the :meth:`copy` method for all +options. + +************** +Redshift Class +************** + +.. autoclass :: parsons.Redshift + :inherited-members: \ No newline at end of file diff --git a/docs/html/_sources/s3.rst.txt b/docs/html/_sources/s3.rst.txt new file mode 100644 index 0000000000..3c66c54f54 --- /dev/null +++ b/docs/html/_sources/s3.rst.txt @@ -0,0 +1,15 @@ +S3 +=== + +The S3 class heavily reliant on the ``boto3`` python package. It includes a suite of common methods that are commonly +used with S3. + +In order to instantiate the class you must pass valid kwargs, or have a aws config file stored locally, or store the following +environmental variables: + +* ``'AWS_ACCESS_KEY_ID'`` +* ``'AWS_SECRET_ACCESS_KEY'`` + +.. autoclass :: parsons.S3 + :inherited-members: + :members: \ No newline at end of file diff --git a/docs/html/_sources/sftp.rst.txt b/docs/html/_sources/sftp.rst.txt new file mode 100644 index 0000000000..515e777ec5 --- /dev/null +++ b/docs/html/_sources/sftp.rst.txt @@ -0,0 +1,11 @@ +SFTP +==== + +The SFTP class allows you to interact with SFTP services. + +It uses the `Paramiko SFTP library `_ under the hood. + + +.. autoclass :: parsons.SFTP + :inherited-members: + :members: diff --git a/docs/html/_sources/table.rst.txt b/docs/html/_sources/table.rst.txt new file mode 100644 index 0000000000..c1a083dbd4 --- /dev/null +++ b/docs/html/_sources/table.rst.txt @@ -0,0 +1,313 @@ +.. _parsons-table: + +Parsons Table +============= + + +******** +Overview +******** + +Most methods and functions in Parsons return a ``Table``, which is a 2D list-like object similar to a Pandas Dataframe. You can call the following methods on the Table object to output it into a variety of formats or storage types. A full list of ``Table`` methods can be found in the API section. + +=================== +From Parsons Table +=================== + +.. list-table:: + :widths: 25 25 50 + :header-rows: 1 + + * - Method + - Destination Type + - Description + * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_csv` + - CSV File + - Write a table to a local csv file + * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_s3_csv` + - AWS s3 Bucket + - Write a table to a csv stored in S3 + * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_sftp_csv` + - SFTP Server + - Write a table to a csv stored on an SFTP server + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_csv` + - A Redshift Database + - Write a table to a Redshift database + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_postgres` + - A Postgres Database + - Write a table to a Postgres database + * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_civis` + - Civis Redshift Database + - Write a table to Civis platform database + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_petl` + - Petl table object + - Convert a table a Petl table object + * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_json` + - JSON file + - Write a table to a local JSON file + * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_html` + - HTML formatted table + - Write a table to a local html file + * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_dataframe` + - Pandas Dataframe [1]_ + - Return a Pandas dataframe + +.. [1] Requires optional installation of Pandas package by running ``pip install pandas``. + +================ +To Parsons Table +================ + +Create Parsons Table object using the following methods. + +.. list-table:: + :widths: 25 25 50 + :header-rows: 1 + + * - Method + - Source Type + - Description + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_csv` + - File like object, local path, url, ftp. + - Loads a csv object into a Table + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_json` + - File like object, local path, url, ftp. + - Loads a json object into a Table + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_columns` + - List object + - Loads lists organized as columns in Table + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_redshift` + - Redshift table + - Loads a Redshift query into a Table + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_postgres` + - Postgres table + - Loads a Postgres query into a Table + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_dataframe` + - Pandas Dataframe [2]_ + - Load a Parsons table from a Pandas Dataframe + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_s3_csv` + - S3 CSV + - Load a Parsons table from a csv file on S3 + +.. [2] Requires optional installation of Pandas package by running ``pip install pandas``. + +You can also use the Table constructor to create a Table from a python list or petl table: + +.. code-block:: python + + # From a list of dicts + tbl = Table([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) + + # From a list of lists, the first list holding the field names + tbl = Table([['a', 'b'], [1, 2], [3, 4]]) + + # From a petl table + tbl = Table(petl_tbl) + +======================== +Parsons Table Attributes +======================== + +Tables have a number of convenience attributes. + +.. list-table:: + :widths: 25 50 + :header-rows: 1 + + * - Attribute + - Description + * - ``.num_rows`` + - The number of rows in the table + * - ``.columns`` + - A list of column names in the table + * - ``.data`` + - The actual data (rows) of the table, as a list of tuples (without field names) + * - ``.first`` + - The first value in the table. Use for database queries where a single value is returned. + +============================= +Parsons Table Transformations +============================= + +Parsons tables have many methods that allow you to easily transform tables. Below is a selection +of commonly used methods. The full list can be found in the API section. + +**Column Transformations** + +.. list-table:: + :widths: 40 200 + :header-rows: 1 + + * - Method + - Description + * - :py:meth:`~parsons.etl.etl.ETL.add_column` + - Add a column + * - :py:meth:`~parsons.etl.etl.ETL.remove_column` + - Remove a column + * - :py:meth:`~parsons.etl.etl.ETL.rename_column` + - Rename a column + * - :py:meth:`~parsons.etl.etl.ETL.move_column` + - Move a column within a table + * - :py:meth:`~parsons.etl.etl.ETL.cut` + - Return a table with a subset of columns + * - :py:meth:`~parsons.etl.etl.ETL.fill_column` + - Provide a fixed value to fill a column + * - :py:meth:`~parsons.etl.etl.ETL.fillna_column` + - Provide a fixed value to fill all null values in a column + * - :py:meth:`~parsons.etl.etl.ETL.get_column_types` + - Get the python type of values for a given column + * - :py:meth:`~parsons.etl.etl.ETL.convert_column` + - Transform the values of a column via arbitrary functions + * - :py:meth:`~parsons.etl.etl.ETL.coalesce_columns` + - Coalesce values from one or more source columns + * - :py:meth:`~parsons.etl.etl.ETL.map_columns` + - Standardizes column names based on multiple possible values + + +**Row Transformations** + +.. list-table:: + :widths: 25 50 + :header-rows: 1 + + * - Method + - Description + * - :py:meth:`~parsons.etl.etl.ETL.select_rows` + - Return a table of a subset of rows based on filters + * - :py:meth:`~parsons.etl.etl.ETL.stack` + - Stack a number of tables on top of one another + * - :py:meth:`~parsons.etl.etl.ETL.chunk` + - Divide tables into smaller tables based on row count + * - :py:meth:`~parsons.etl.etl.ETL.remove_null_rows` + - Removes rows with null values in specified columns + + +**Extraction and Reshaping** + +.. list-table:: + :widths: 25 50 + :header-rows: 1 + + * - Method + - Description + * - :py:meth:`~parsons.etl.etl.ETL.unpack_dict` + - Unpack dictionary values from one column to top level columns + * - :py:meth:`~parsons.etl.etl.ETL.unpack_list` + - Unpack list values from one column and add to top level columns + * - :py:meth:`~parsons.etl.etl.ETL.long_table` + - Take a column with nested data and create a new long table + * - :py:meth:`~parsons.etl.etl.ETL.unpack_nested_columns_as_rows` + - Unpack list or dict values from one column into separate rows + + +====================== +Parsons Table Indexing +====================== + +To access rows and columns of data within a Parsons table, you can index on them. To access a column +pass in the column name as a string (e.g. ``tbl['a']``) and to access a row, pass in the row index as +an integer (e.g. ``tbl[1]``). + +.. code-block:: python + + tbl = Table([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) + + # Return a column as a list + tbl['a'] + >> [1, 3] + + # Return a row as a dict + tbl[1] + >> {'a': 3, 'b': 4} + +A note on indexing and iterating over a table's data: +If you need to iterate over the data, make sure to use the python iterator syntax, so any data transformations can be applied efficiently. An example: + +.. code-block:: python + + # Some data transformations + table.add_column('newcol', 'some value') + + # Efficient way to grab all the data (applying the data transformations only once) + rows_list = [row for row in table] + +.. warning:: + If you must index directly into a table's data, you can do so, but note that data transformations will be applied **each time** you do so. So this code will be very inefficient on a large table... + +.. code-block:: python + + # Inefficient way to grab all the data + rows_list = [] + for i in range(0, table.num_rows): + # Data transformations will be applied each time through this loop! + rows_list.append(table[i]) + +==== +PETL +==== + +The Parsons ``Table`` relies heavily on the `petl `_ Python package. You can always access the underlying petl table with ``my_parsons_table.table``, which will allow you to perform any petl-supported ETL operations. + +============ +Lazy Loading +============ + +The Parsons ``Table`` makes use of "lazy" loading and "lazy" transformations. What this means is that it tries not to load and process your data until absolutely necessary. + +An example: + +.. code-block:: python + + # Specify where to load the data + tbl = Table.from_csv('name_data.csv') + + # Specify data transformations + tbl.add_column('full_name', lambda row: row['first_name'] + ' ' + row['last_name']) + tbl.remove_column(['first_name', 'last_name']) + + # Save the table elsewhere + # IMPORTANT - The CSV won't actually be loaded and transformed until this step, + # since this is the first time it's actually needed. + tbl.to_redshift('main.name_table') + +This "lazy" loading can be very convenient and performant. However, it can make issues hard to debug. Eg. if your data transformations are time-consuming, you won't actually notice that performance hit until you try to use the data, potentially much later in your code. + +So just be aware of this behavior. + +******** +Examples +******** + +=============== +Basic Pipelines +=============== + +.. code-block:: python + + # S3 to Civis + s3 = S3() + csv = s3.get_file('tmc-bucket', 'my_ids.csv') + Table.from_csv(csv).to_civis('TMC','ids.my_ids') + + #VAN Activist Codes to a Dataframe + van = VAN(db='MyVoters') + van.activist_codes().to_dataframe() + + #VAN Events to an s3 bucket + van = VAN(db='MyVoters') + van.events().to_s3_csv('my-van-bucket','myevents.csv') + + +************* +To & From API +************* +.. autoclass:: parsons.etl.tofrom.ToFrom + :inherited-members: + +****************** +Transformation API +****************** +The following methods allow you to manipulate the Parsons table data. + +.. autoclass:: parsons.etl.etl.ETL + :inherited-members: diff --git a/docs/html/_sources/ts.rst.txt b/docs/html/_sources/ts.rst.txt new file mode 100644 index 0000000000..5e32c4db0a --- /dev/null +++ b/docs/html/_sources/ts.rst.txt @@ -0,0 +1,39 @@ +TargetSmart +============ + +`TargetSmart `_ provides access to voter and consumer data for the progressive community. They provide +extensive services for single record lookup through their API. For larger bulk matching services +they have an automation service, which requires that data files be posted to their SFTP. Each service requires separate credentials +to utilize, which is why there are separate classes for each. + +Full documentation for both services can be found at the `TargetSmart developer portal `_. + +.. warning:: + Returned fields + The fields that are returned are controlled by the TargetSmart staff. Please contact them if need any adjustments + or alterations made to the returned fields. + +******* +API 2.0 +******* + +.. warning:: + Endpoint Access + Access to endpoints is individually provisioned. If you encounter errors accessing an endpoint, please contact + your TargetSmart account representative to verify that your API key have been provisioned access. + +.. autoclass :: parsons.TargetSmartAPI + :inherited-members: + +********** +Automation +********** + +In order to instantiate the class you must pass valid kwargs or store the following +environmental variables: + +* ``'TS_SFTP_USERNAME'`` +* ``'TS_SFTP_PASSWORD'`` + +.. autoclass :: parsons.TargetSmartAutomation + :inherited-members: \ No newline at end of file diff --git a/docs/html/_sources/turbovote.rst.txt b/docs/html/_sources/turbovote.rst.txt new file mode 100644 index 0000000000..3147eb0892 --- /dev/null +++ b/docs/html/_sources/turbovote.rst.txt @@ -0,0 +1,48 @@ +TurboVote +========= + +******** +Overview +******** + +`TurboVote `_ is an online voter registration and vote by mail +tool. This class contains a single method which allows you to export your users +(aka signups). + +.. note:: + Authentication + TurboVote requires `HTTP Basic Auth `_. + Clients with a TurboVote account must pass their username, password, and subdomain. + +********** +QuickStart +********** + +To instantiate the ``TurboVote`` class, you can either store your TurboVote API +username, password, subdomain as environmental variables (``TURBOVOTE_USERNAME``, +``TURBOVOTE_PASSWORD``, and ``TURBOVOTE_SUBDOMAIN``, respectively) or pass them +in as arguments: + +.. code-block:: python + + from parsons import TurboVote + + # First approach: Pass credentials via environmental variables. + tv = TurboVote() + + # Second approach: Pass credentials as arguments. + tv = TurboVote(username='me', password='pass', subdomain='myorg') + +You can then call the method: + +.. code-block:: python + + # Get users + tv.get_users() + +*** +API +*** + +.. autoclass :: parsons.TurboVote + :inherited-members: \ No newline at end of file diff --git a/docs/html/_sources/van.rst.txt b/docs/html/_sources/van.rst.txt new file mode 100644 index 0000000000..6ee8634d88 --- /dev/null +++ b/docs/html/_sources/van.rst.txt @@ -0,0 +1,314 @@ +VAN +========== + + +******** +Overview +******** + +The VAN module leverages the VAN API and generally follows the naming convention of their API endpoints. It +is recommended that you reference their `API documentation `_ to +additional details and information. + +.. note:: + API Keys + - VAN API Keys are required to use this module. + - API Keys are specific to each committee and state, so you might need many. + - Not all API Keys are provisioned for all end points. You should contact VAN if you need access. + - VAN typically offers a cap of free API calls per day. API calls which exceed the free limit + incurr a cost. + +.. warning:: + VANIDs + VANIDs are unique to each state and instance of the VAN. VANIDs used for the AV VAN **will not** match + those of the SmartVAN. + +.. toctree:: + :maxdepth: 1 + +********** +QuickStart +********** + +To call the VAN class you can either store the api key as an environmental variable VAN_API_KEY or pass it in as an argument.. + +.. code-block:: python + + from parsons import VAN + + van = VAN(db='MyVoters') # Initiate class via environmental variable api key + + van = VAN(api_key='asdfa-sdfadsf-adsfasdf',db='MyVoters') # Pass api key directly + +You can then call various endpoints: + +.. code-block:: python + + from parsons import VAN + + van = VAN() + + # List events with a date filter + events = van.events(starting_before='2018-02-01') + + # List all folders shared with API Key User + folders = van.folders() + + # Return a dataframe rather than a list of dicts + saved_lists = van.saved_lists().to_df() + +This a is just a small sampling of all of the VAN endpoints that you can leverage. We recommend reviewing the +documentation for all functions. + +**************** +Common Workflows +**************** + +=========================== +Score: Loading and Updating +=========================== + +Loading a score a multi-step process. Once a score is set to approved, loading takes place +overnight. + +.. code-block:: python + + from parsons import VAN + + #Instatiate Class + van = VAN(db="MyVoters") + + #List all of the scores / slots + print json.dumps(van.scores(), indent=4) + + #Input the score slot id + score_slot_id = 34115 + + #Load the file via the file to VAN + r = van.file_load('score.csv', + 'https://box.com/scores.zip', + ['vanid','myscore'], + 'vanid', + 'VANID', + score_slot_id, + 'myscore', + email='anemailaddress@gmail.com') + + + # Update Status - The email that you get when it is loaded will include a score update + # id. Pass this to approve the score to be loaded. + # - Might take a few minutes to get the email + # - Email will also include some nice stats to QC, included matched rows + + van.score_update_status(47187,'approved') # Pass the score update id and set to approved + +=========================== +People: Add Survey Response +=========================== +The following workflow can be used to apply survey questions, activist codes +and canvass reponses. + +.. code-block:: python + + from parsons import VAN + + # Instatiate Class + van = VAN(db="MyVoters") + + sq_id = 311838 # Valid survey question id + sr = 1288926 # Valid survey response id + ct = 36 # Valid contact type id + it_id = 4 # Valid input type id + + # Create a valid survey question response + sq_response = van.people_sq_response(sq_id,sr_id) + van.people_canvass_response(dwid, + key_type='dwid', + contact_type_id=ct_id, + input_type_id=it_id, + responses=sq_response) + +============================= +Event: Creating and Modifying +============================= + +Events are made up of sub objects that need to exist to create an event + +* Event Object - The event itself +* Event Type - The type of event, such as a `Canvass` or `Phone Bank`. These are created + in the VAN UI and can be reused for multiple events. +* Locations - An event can have multiple locations. While not required to initially create an + event, these are required to add signups to an event. +* Roles - The various roles that a person can have at an event, such as ``Lead`` or + ``Canvasser``. These are set as part of the event type. +* Shifts - Each event can have multiple shits in which a person can be assigned. These are + specified in the event creation. + +.. code-block:: python + + from parsons import VAN + + # Instatiate class + van = VAN(db="EveryAction") + + # Create A Location + loc_id = van.location(name='Big `Ol Canvass', address='100 W Washington', city='Chicago', state='IL') + + # Create Event + name = 'GOTV Canvass' # Name of event + short_name = 'GOTVCan' # Short name of event, 12 chars or less + start_time = '2018-11-01T15:00:00' # ISO formatted date + end_time = '2018-11-01T18:00:00' # ISO formatted date after start time + event_type_id = 296199 # A valid event type id + roles = [259236] # A list of valid role ids + location_ids = [loc_id] # An optional list of locations ids for the event + description = 'CPD Super Volunteers Canvass' # Optional description of 200 chars or less + shifts = [{'name': 'Shift 1', + 'start_time': '2018-11-01T15:00:00', + 'end_time': '2018-11-11T17:00:00'}] # Shifts must fall within event start/end time. + + new_event = van.event_create(name, short_name, start_time, end_time, event_type_id, roles, + location_ids=location_ids, shifts=shifts, description=description) + + +============================ +Signup: Adding and Modifying +============================ + +.. code-block:: python + + from parsons import VAN + + # Instatiate class + van = VAN(db="EveryAction") + + # Create a new signup + + vanid = 100349920 + event_id = 750001004 + shift_id = 19076 + role_id = 263920 + location_id = 3 + role_id = 263920 + status_id = 11 + + # Create the signup. Will return a signup id + signup_id = van.signup_create(vanid, event_id, shift_id, role_id, status_id, location_id + + # Modify a status of the signup + new_status_id = 6 + van.signup_update(signup_id, status_id=new_status_id) + + +****** +People +****** +.. autoclass:: parsons.ngpvan.van.People + :inherited-members: + +************** +Activist Codes +************** +.. autoclass:: parsons.ngpvan.van.ActivistCodes + :inherited-members: + +***************** +Survey Questions +***************** +.. autoclass:: parsons.ngpvan.van.SurveyQuestions + :inherited-members: + +****** +Events +****** +.. autoclass:: parsons.ngpvan.van.Events + :inherited-members: + +********* +Locations +********* +.. autoclass:: parsons.ngpvan.van.Locations + :inherited-members: + +******* +Signups +******* +.. autoclass:: parsons.ngpvan.van.Signups + :inherited-members: + +***** +Codes +***** +.. autoclass:: parsons.ngpvan.van.Codes + :inherited-members: + +***************** +Canvass Responses +***************** +.. autoclass:: parsons.ngpvan.van.CanvassResponses + :inherited-members: + +*********** +Saved Lists +*********** +.. note:: + A saved list must be shared with the user associated with your API key to + be listed. + +.. autoclass:: parsons.ngpvan.van.SavedLists + :inherited-members: + +******* +Folders +******* +.. note:: + A folder must be shared with the user associated with your API key to + be listed. + +.. autoclass:: parsons.ngpvan.van.Folders + :inherited-members: + +*********** +Export Jobs +*********** +.. autoclass:: parsons.ngpvan.van.ExportJobs + :inherited-members: + +****** +Scores +****** +Prior to loading a score for the first time, you must contact VAN support to request +a score slot. + +.. note:: + Score Auto Approval + Scores can be automatically set to ``approved`` through the file_load function allowing + you to skip calling the :meth:`file_load` function. To automatically approve scores, + if the average of the scores is within the fault tolerance specified by the user.It + is only available to API keys with permission to automatically approve scores. + + +.. autoclass:: parsons.ngpvan.van.Scores + :inherited-members: + +************* +Score Updates +************* +.. autoclass:: parsons.ngpvan.van.ScoreUpdates + :inherited-members: + +******* +Targets +******* +.. autoclass:: parsons.ngpvan.van.Targets + :inherited-members: + +***************** +File Loading Jobs +***************** +.. autoclass:: parsons.ngpvan.van.FileLoadingJobs + :inherited-members: + + + + diff --git a/docs/html/_static/ajax-loader.gif b/docs/html/_static/ajax-loader.gif new file mode 100644 index 0000000000..61faf8cab2 Binary files /dev/null and b/docs/html/_static/ajax-loader.gif differ diff --git a/docs/html/_static/basic.css b/docs/html/_static/basic.css new file mode 100644 index 0000000000..104f076ae8 --- /dev/null +++ b/docs/html/_static/basic.css @@ -0,0 +1,676 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 450px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist td { + vertical-align: top; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +dl { + margin-bottom: 15px; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: relative; + left: 0px; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/html/_static/comment-bright.png b/docs/html/_static/comment-bright.png new file mode 100644 index 0000000000..15e27edb12 Binary files /dev/null and b/docs/html/_static/comment-bright.png differ diff --git a/docs/html/_static/comment-close.png b/docs/html/_static/comment-close.png new file mode 100644 index 0000000000..4d91bcf57d Binary files /dev/null and b/docs/html/_static/comment-close.png differ diff --git a/docs/html/_static/comment.png b/docs/html/_static/comment.png new file mode 100644 index 0000000000..dfbc0cbd51 Binary files /dev/null and b/docs/html/_static/comment.png differ diff --git a/docs/html/_static/css/badge_only.css b/docs/html/_static/css/badge_only.css new file mode 100644 index 0000000000..323730ae29 --- /dev/null +++ b/docs/html/_static/css/badge_only.css @@ -0,0 +1 @@ +.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../fonts/fontawesome-webfont.eot");src:url("../fonts/fontawesome-webfont.eot?#iefix") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff") format("woff"),url("../fonts/fontawesome-webfont.ttf") format("truetype"),url("../fonts/fontawesome-webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} diff --git a/docs/html/_static/css/theme.css b/docs/html/_static/css/theme.css new file mode 100644 index 0000000000..b19dbfe59a --- /dev/null +++ b/docs/html/_static/css/theme.css @@ -0,0 +1,6 @@ +/* sphinx_rtd_theme version 0.4.2 | MIT license */ +/* Built 20181005 13:10 */ +*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none}[hidden]{display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:hover,a:active{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;color:#000;text-decoration:none}mark{background:#ff0;color:#000;font-style:italic;font-weight:bold}pre,code,.rst-content tt,.rst-content code,kbd,samp{font-family:monospace,serif;_font-family:"courier new",monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:before,q:after{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}ul,ol,dl{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure{margin:0}form{margin:0}fieldset{border:0;margin:0;padding:0}label{cursor:pointer}legend{border:0;*margin-left:-7px;padding:0;white-space:normal}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0;*width:13px;*height:13px}input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type="search"]::-webkit-search-decoration,input[type="search"]::-webkit-search-cancel-button{-webkit-appearance:none}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}textarea{overflow:auto;vertical-align:top;resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none !important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{html,body,section{background:none !important}*{box-shadow:none !important;text-shadow:none !important;filter:none !important;-ms-filter:none !important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100% !important}@page{margin:.5cm}p,h2,.rst-content .toctree-wrapper p.caption,h3{orphans:3;widows:3}h2,.rst-content .toctree-wrapper p.caption,h3{page-break-after:avoid}}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo,.rst-content .admonition,.btn,input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"],select,textarea,.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a,.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a,.wy-nav-top a{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:'FontAwesome';src:url("../fonts/fontawesome-webfont.eot?v=4.7.0");src:url("../fonts/fontawesome-webfont.eot?#iefix&v=4.7.0") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff2?v=4.7.0") format("woff2"),url("../fonts/fontawesome-webfont.woff?v=4.7.0") format("woff"),url("../fonts/fontawesome-webfont.ttf?v=4.7.0") format("truetype"),url("../fonts/fontawesome-webfont.svg?v=4.7.0#fontawesomeregular") format("svg");font-weight:normal;font-style:normal}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.3333333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.2857142857em;text-align:center}.fa-ul{padding-left:0;margin-left:2.1428571429em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.1428571429em;width:2.1428571429em;top:.1428571429em;text-align:center}.fa-li.fa-lg{left:-1.8571428571em}.fa-border{padding:.2em .25em .15em;border:solid 0.08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.wy-menu-vertical li span.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-left.toctree-expand,.wy-menu-vertical li.current>a span.fa-pull-left.toctree-expand,.rst-content .fa-pull-left.admonition-title,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content dl dt .fa-pull-left.headerlink,.rst-content p.caption .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.rst-content code.download span.fa-pull-left:first-child,.fa-pull-left.icon{margin-right:.3em}.fa.fa-pull-right,.wy-menu-vertical li span.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a span.fa-pull-right.toctree-expand,.wy-menu-vertical li.current>a span.fa-pull-right.toctree-expand,.rst-content .fa-pull-right.admonition-title,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content dl dt .fa-pull-right.headerlink,.rst-content p.caption .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.rst-content code.download span.fa-pull-right:first-child,.fa-pull-right.icon{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.wy-menu-vertical li span.pull-left.toctree-expand,.wy-menu-vertical li.on a span.pull-left.toctree-expand,.wy-menu-vertical li.current>a span.pull-left.toctree-expand,.rst-content .pull-left.admonition-title,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content dl dt .pull-left.headerlink,.rst-content p.caption .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.rst-content code.download span.pull-left:first-child,.pull-left.icon{margin-right:.3em}.fa.pull-right,.wy-menu-vertical li span.pull-right.toctree-expand,.wy-menu-vertical li.on a span.pull-right.toctree-expand,.wy-menu-vertical li.current>a span.pull-right.toctree-expand,.rst-content .pull-right.admonition-title,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content dl dt .pull-right.headerlink,.rst-content p.caption .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.rst-content code.download span.pull-right:first-child,.pull-right.icon{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-remove:before,.fa-close:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-gear:before,.fa-cog:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-rotate-right:before,.fa-repeat:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.rst-content .admonition-title:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-warning:before,.fa-exclamation-triangle:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-gears:before,.fa-cogs:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-save:before,.fa-floppy-o:before{content:""}.fa-square:before{content:""}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.wy-dropdown .caret:before,.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-unsorted:before,.fa-sort:before{content:""}.fa-sort-down:before,.fa-sort-desc:before{content:""}.fa-sort-up:before,.fa-sort-asc:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-legal:before,.fa-gavel:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-flash:before,.fa-bolt:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-paste:before,.fa-clipboard:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-unlink:before,.fa-chain-broken:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:""}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:""}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:""}.fa-euro:before,.fa-eur:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-rupee:before,.fa-inr:before{content:""}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:""}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:""}.fa-won:before,.fa-krw:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-turkish-lira:before,.fa-try:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li span.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-institution:before,.fa-bank:before,.fa-university:before{content:""}.fa-mortar-board:before,.fa-graduation-cap:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:""}.fa-file-zip-o:before,.fa-file-archive-o:before{content:""}.fa-file-sound-o:before,.fa-file-audio-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-resistance:before,.fa-rebel:before{content:""}.fa-ge:before,.fa-empire:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-send:before,.fa-paper-plane:before{content:""}.fa-send-o:before,.fa-paper-plane-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-hotel:before,.fa-bed:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-yc:before,.fa-y-combinator:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery:before,.fa-battery-full:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-tv:before,.fa-television:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-asl-interpreting:before,.fa-american-sign-language-interpreting:before{content:""}.fa-deafness:before,.fa-hard-of-hearing:before,.fa-deaf:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-signing:before,.fa-sign-language:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-vcard:before,.fa-address-card:before{content:""}.fa-vcard-o:before,.fa-address-card-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer:before,.fa-thermometer-full:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bathtub:before,.fa-s15:before,.fa-bath:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0, 0, 0, 0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,.rst-content .admonition-title,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.rst-content code.download span:first-child,.icon,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context{font-family:inherit}.fa:before,.wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li.on a span.toctree-expand:before,.wy-menu-vertical li.current>a span.toctree-expand:before,.rst-content .admonition-title:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content dl dt .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before,.icon:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before{font-family:"FontAwesome";display:inline-block;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa,a .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li a span.toctree-expand,.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand,a .rst-content .admonition-title,.rst-content a .admonition-title,a .rst-content h1 .headerlink,.rst-content h1 a .headerlink,a .rst-content h2 .headerlink,.rst-content h2 a .headerlink,a .rst-content h3 .headerlink,.rst-content h3 a .headerlink,a .rst-content h4 .headerlink,.rst-content h4 a .headerlink,a .rst-content h5 .headerlink,.rst-content h5 a .headerlink,a .rst-content h6 .headerlink,.rst-content h6 a .headerlink,a .rst-content dl dt .headerlink,.rst-content dl dt a .headerlink,a .rst-content p.caption .headerlink,.rst-content p.caption a .headerlink,a .rst-content table>caption .headerlink,.rst-content table>caption a .headerlink,a .rst-content tt.download span:first-child,.rst-content tt.download a span:first-child,a .rst-content code.download span:first-child,.rst-content code.download a span:first-child,a .icon{display:inline-block;text-decoration:inherit}.btn .fa,.btn .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .btn span.toctree-expand,.btn .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .btn span.toctree-expand,.btn .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .btn span.toctree-expand,.btn .rst-content .admonition-title,.rst-content .btn .admonition-title,.btn .rst-content h1 .headerlink,.rst-content h1 .btn .headerlink,.btn .rst-content h2 .headerlink,.rst-content h2 .btn .headerlink,.btn .rst-content h3 .headerlink,.rst-content h3 .btn .headerlink,.btn .rst-content h4 .headerlink,.rst-content h4 .btn .headerlink,.btn .rst-content h5 .headerlink,.rst-content h5 .btn .headerlink,.btn .rst-content h6 .headerlink,.rst-content h6 .btn .headerlink,.btn .rst-content dl dt .headerlink,.rst-content dl dt .btn .headerlink,.btn .rst-content p.caption .headerlink,.rst-content p.caption .btn .headerlink,.btn .rst-content table>caption .headerlink,.rst-content table>caption .btn .headerlink,.btn .rst-content tt.download span:first-child,.rst-content tt.download .btn span:first-child,.btn .rst-content code.download span:first-child,.rst-content code.download .btn span:first-child,.btn .icon,.nav .fa,.nav .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .nav span.toctree-expand,.nav .wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.on a .nav span.toctree-expand,.nav .wy-menu-vertical li.current>a span.toctree-expand,.wy-menu-vertical li.current>a .nav span.toctree-expand,.nav .rst-content .admonition-title,.rst-content .nav .admonition-title,.nav .rst-content h1 .headerlink,.rst-content h1 .nav .headerlink,.nav .rst-content h2 .headerlink,.rst-content h2 .nav .headerlink,.nav .rst-content h3 .headerlink,.rst-content h3 .nav .headerlink,.nav .rst-content h4 .headerlink,.rst-content h4 .nav .headerlink,.nav .rst-content h5 .headerlink,.rst-content h5 .nav .headerlink,.nav .rst-content h6 .headerlink,.rst-content h6 .nav .headerlink,.nav .rst-content dl dt .headerlink,.rst-content dl dt .nav .headerlink,.nav .rst-content p.caption .headerlink,.rst-content p.caption .nav .headerlink,.nav .rst-content table>caption .headerlink,.rst-content table>caption .nav .headerlink,.nav .rst-content tt.download span:first-child,.rst-content tt.download .nav span:first-child,.nav .rst-content code.download span:first-child,.rst-content code.download .nav span:first-child,.nav .icon{display:inline}.btn .fa.fa-large,.btn .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .btn span.fa-large.toctree-expand,.btn .rst-content .fa-large.admonition-title,.rst-content .btn .fa-large.admonition-title,.btn .rst-content h1 .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.btn .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .btn .fa-large.headerlink,.btn .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .btn .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .btn span.fa-large:first-child,.btn .rst-content code.download span.fa-large:first-child,.rst-content code.download .btn span.fa-large:first-child,.btn .fa-large.icon,.nav .fa.fa-large,.nav .wy-menu-vertical li span.fa-large.toctree-expand,.wy-menu-vertical li .nav span.fa-large.toctree-expand,.nav .rst-content .fa-large.admonition-title,.rst-content .nav .fa-large.admonition-title,.nav .rst-content h1 .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.nav .rst-content dl dt .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.nav .rst-content p.caption .fa-large.headerlink,.rst-content p.caption .nav .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.nav .rst-content code.download span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.nav .fa-large.icon{line-height:.9em}.btn .fa.fa-spin,.btn .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .btn span.fa-spin.toctree-expand,.btn .rst-content .fa-spin.admonition-title,.rst-content .btn .fa-spin.admonition-title,.btn .rst-content h1 .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.btn .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .btn .fa-spin.headerlink,.btn .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .btn .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .btn span.fa-spin:first-child,.btn .rst-content code.download span.fa-spin:first-child,.rst-content code.download .btn span.fa-spin:first-child,.btn .fa-spin.icon,.nav .fa.fa-spin,.nav .wy-menu-vertical li span.fa-spin.toctree-expand,.wy-menu-vertical li .nav span.fa-spin.toctree-expand,.nav .rst-content .fa-spin.admonition-title,.rst-content .nav .fa-spin.admonition-title,.nav .rst-content h1 .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.nav .rst-content dl dt .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.nav .rst-content p.caption .fa-spin.headerlink,.rst-content p.caption .nav .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.nav .rst-content code.download span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.nav .fa-spin.icon{display:inline-block}.btn.fa:before,.wy-menu-vertical li span.btn.toctree-expand:before,.rst-content .btn.admonition-title:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content dl dt .btn.headerlink:before,.rst-content p.caption .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.rst-content code.download span.btn:first-child:before,.btn.icon:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.wy-menu-vertical li span.btn.toctree-expand:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content p.caption .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.rst-content code.download span.btn:first-child:hover:before,.btn.icon:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .wy-menu-vertical li span.toctree-expand:before,.wy-menu-vertical li .btn-mini span.toctree-expand:before,.btn-mini .rst-content .admonition-title:before,.rst-content .btn-mini .admonition-title:before,.btn-mini .rst-content h1 .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.btn-mini .rst-content dl dt .headerlink:before,.rst-content dl dt .btn-mini .headerlink:before,.btn-mini .rst-content p.caption .headerlink:before,.rst-content p.caption .btn-mini .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.rst-content tt.download .btn-mini span:first-child:before,.btn-mini .rst-content code.download span:first-child:before,.rst-content code.download .btn-mini span:first-child:before,.btn-mini .icon:before{font-size:14px;vertical-align:-15%}.wy-alert,.rst-content .note,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .warning,.rst-content .seealso,.rst-content .admonition-todo,.rst-content .admonition{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.wy-alert-title,.rst-content .admonition-title{color:#fff;font-weight:bold;display:block;color:#fff;background:#6ab0de;margin:-12px;padding:6px 12px;margin-bottom:12px}.wy-alert.wy-alert-danger,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.admonition{background:#fdf3f2}.wy-alert.wy-alert-danger .wy-alert-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .danger .wy-alert-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .danger .admonition-title,.rst-content .error .admonition-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition .admonition-title{background:#f29f97}.wy-alert.wy-alert-warning,.rst-content .wy-alert-warning.note,.rst-content .attention,.rst-content .caution,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.tip,.rst-content .warning,.rst-content .wy-alert-warning.seealso,.rst-content .admonition-todo,.rst-content .wy-alert-warning.admonition{background:#ffedcc}.wy-alert.wy-alert-warning .wy-alert-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .attention .wy-alert-title,.rst-content .caution .wy-alert-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .attention .admonition-title,.rst-content .caution .admonition-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .warning .admonition-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .admonition-todo .admonition-title,.rst-content .wy-alert-warning.admonition .admonition-title{background:#f0b37e}.wy-alert.wy-alert-info,.rst-content .note,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.rst-content .seealso,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.admonition{background:#e7f2fa}.wy-alert.wy-alert-info .wy-alert-title,.rst-content .note .wy-alert-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.rst-content .note .admonition-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .seealso .admonition-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition .admonition-title{background:#6ab0de}.wy-alert.wy-alert-success,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.warning,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.admonition{background:#dbfaf4}.wy-alert.wy-alert-success .wy-alert-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .hint .wy-alert-title,.rst-content .important .wy-alert-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .hint .admonition-title,.rst-content .important .admonition-title,.rst-content .tip .admonition-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition .admonition-title{background:#1abc9c}.wy-alert.wy-alert-neutral,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.admonition{background:#f3f6f6}.wy-alert.wy-alert-neutral .wy-alert-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition .admonition-title{color:#404040;background:#e1e4e5}.wy-alert.wy-alert-neutral a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a{color:#2980B9}.wy-alert p:last-child,.rst-content .note p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.rst-content .seealso p:last-child,.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0px;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,0.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27AE60}.wy-tray-container li.wy-tray-item-info{background:#2980B9}.wy-tray-container li.wy-tray-item-warning{background:#E67E22}.wy-tray-container li.wy-tray-item-danger{background:#E74C3C}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width: 768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px 12px;color:#fff;border:1px solid rgba(0,0,0,0.1);background-color:#27AE60;text-decoration:none;font-weight:normal;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:0px 1px 2px -1px rgba(255,255,255,0.5) inset,0px -2px 0px 0px rgba(0,0,0,0.1) inset;outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:0px -1px 0px 0px rgba(0,0,0,0.05) inset,0px 2px 0px 0px rgba(0,0,0,0.1) inset;padding:8px 12px 6px 12px}.btn:visited{color:#fff}.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn-disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn-disabled:hover,.btn-disabled:focus,.btn-disabled:active{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980B9 !important}.btn-info:hover{background-color:#2e8ece !important}.btn-neutral{background-color:#f3f6f6 !important;color:#404040 !important}.btn-neutral:hover{background-color:#e5ebeb !important;color:#404040}.btn-neutral:visited{color:#404040 !important}.btn-success{background-color:#27AE60 !important}.btn-success:hover{background-color:#295 !important}.btn-danger{background-color:#E74C3C !important}.btn-danger:hover{background-color:#ea6153 !important}.btn-warning{background-color:#E67E22 !important}.btn-warning:hover{background-color:#e98b39 !important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f !important}.btn-link{background-color:transparent !important;color:#2980B9;box-shadow:none;border-color:transparent !important}.btn-link:hover{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:active{background-color:transparent !important;color:#409ad5 !important;box-shadow:none}.btn-link:visited{color:#9B59B6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:before,.wy-btn-group:after{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:solid 1px #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,0.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980B9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:solid 1px #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type="search"]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980B9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned input,.wy-form-aligned textarea,.wy-form-aligned select,.wy-form-aligned .wy-help-inline,.wy-form-aligned label{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{border:0;margin:0;padding:0}legend{display:block;width:100%;border:0;padding:0;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label{display:block;margin:0 0 .3125em 0;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;*zoom:1;max-width:68em;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group:before,.wy-control-group:after{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#E74C3C}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full input[type="text"],.wy-control-group .wy-form-full input[type="password"],.wy-control-group .wy-form-full input[type="email"],.wy-control-group .wy-form-full input[type="url"],.wy-control-group .wy-form-full input[type="date"],.wy-control-group .wy-form-full input[type="month"],.wy-control-group .wy-form-full input[type="time"],.wy-control-group .wy-form-full input[type="datetime"],.wy-control-group .wy-form-full input[type="datetime-local"],.wy-control-group .wy-form-full input[type="week"],.wy-control-group .wy-form-full input[type="number"],.wy-control-group .wy-form-full input[type="search"],.wy-control-group .wy-form-full input[type="tel"],.wy-control-group .wy-form-full input[type="color"],.wy-control-group .wy-form-halves input[type="text"],.wy-control-group .wy-form-halves input[type="password"],.wy-control-group .wy-form-halves input[type="email"],.wy-control-group .wy-form-halves input[type="url"],.wy-control-group .wy-form-halves input[type="date"],.wy-control-group .wy-form-halves input[type="month"],.wy-control-group .wy-form-halves input[type="time"],.wy-control-group .wy-form-halves input[type="datetime"],.wy-control-group .wy-form-halves input[type="datetime-local"],.wy-control-group .wy-form-halves input[type="week"],.wy-control-group .wy-form-halves input[type="number"],.wy-control-group .wy-form-halves input[type="search"],.wy-control-group .wy-form-halves input[type="tel"],.wy-control-group .wy-form-halves input[type="color"],.wy-control-group .wy-form-thirds input[type="text"],.wy-control-group .wy-form-thirds input[type="password"],.wy-control-group .wy-form-thirds input[type="email"],.wy-control-group .wy-form-thirds input[type="url"],.wy-control-group .wy-form-thirds input[type="date"],.wy-control-group .wy-form-thirds input[type="month"],.wy-control-group .wy-form-thirds input[type="time"],.wy-control-group .wy-form-thirds input[type="datetime"],.wy-control-group .wy-form-thirds input[type="datetime-local"],.wy-control-group .wy-form-thirds input[type="week"],.wy-control-group .wy-form-thirds input[type="number"],.wy-control-group .wy-form-thirds input[type="search"],.wy-control-group .wy-form-thirds input[type="tel"],.wy-control-group .wy-form-thirds input[type="color"]{width:100%}.wy-control-group .wy-form-full{float:left;display:block;margin-right:2.3576515979%;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.3576515979%;width:48.821174201%}.wy-control-group .wy-form-halves:last-child{margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(2n+1){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.3576515979%;width:31.7615656014%}.wy-control-group .wy-form-thirds:last-child{margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control{margin:6px 0 0 0;font-size:90%}.wy-control-no-input{display:inline-block;margin:6px 0 0 0;font-size:90%}.wy-control-group.fluid-input input[type="text"],.wy-control-group.fluid-input input[type="password"],.wy-control-group.fluid-input input[type="email"],.wy-control-group.fluid-input input[type="url"],.wy-control-group.fluid-input input[type="date"],.wy-control-group.fluid-input input[type="month"],.wy-control-group.fluid-input input[type="time"],.wy-control-group.fluid-input input[type="datetime"],.wy-control-group.fluid-input input[type="datetime-local"],.wy-control-group.fluid-input input[type="week"],.wy-control-group.fluid-input input[type="number"],.wy-control-group.fluid-input input[type="search"],.wy-control-group.fluid-input input[type="tel"],.wy-control-group.fluid-input input[type="color"]{width:100%}.wy-form-message-inline{display:inline-block;padding-left:.3em;color:#666;vertical-align:middle;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;*overflow:visible}input[type="text"],input[type="password"],input[type="email"],input[type="url"],input[type="date"],input[type="month"],input[type="time"],input[type="datetime"],input[type="datetime-local"],input[type="week"],input[type="number"],input[type="search"],input[type="tel"],input[type="color"]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type="datetime-local"]{padding:.34375em .625em}input[disabled]{cursor:default}input[type="checkbox"],input[type="radio"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type="search"]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}input[type="text"]:focus,input[type="password"]:focus,input[type="email"]:focus,input[type="url"]:focus,input[type="date"]:focus,input[type="month"]:focus,input[type="time"]:focus,input[type="datetime"]:focus,input[type="datetime-local"]:focus,input[type="week"]:focus,input[type="number"]:focus,input[type="search"]:focus,input[type="tel"]:focus,input[type="color"]:focus{outline:0;outline:thin dotted \9;border-color:#333}input.no-focus:focus{border-color:#ccc !important}input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:1px auto #129FEA}input[type="text"][disabled],input[type="password"][disabled],input[type="email"][disabled],input[type="url"][disabled],input[type="date"][disabled],input[type="month"][disabled],input[type="time"][disabled],input[type="datetime"][disabled],input[type="datetime-local"][disabled],input[type="week"][disabled],input[type="number"][disabled],input[type="search"][disabled],input[type="tel"][disabled],input[type="color"][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,textarea:focus:invalid,select:focus:invalid{color:#E74C3C;border:1px solid #E74C3C}input:focus:invalid:focus,textarea:focus:invalid:focus,select:focus:invalid:focus{border-color:#E74C3C}input[type="file"]:focus:invalid:focus,input[type="radio"]:focus:invalid:focus,input[type="checkbox"]:focus:invalid:focus{outline-color:#E74C3C}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type="radio"][disabled],input[type="checkbox"][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:solid 1px #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{position:absolute;content:"";display:block;left:0;top:0;width:36px;height:12px;border-radius:4px;background:#ccc;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{position:absolute;content:"";display:block;width:18px;height:18px;border-radius:4px;background:#999;left:-3px;top:-3px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27AE60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#E74C3C}.wy-control-group.wy-control-group-error input[type="text"],.wy-control-group.wy-control-group-error input[type="password"],.wy-control-group.wy-control-group-error input[type="email"],.wy-control-group.wy-control-group-error input[type="url"],.wy-control-group.wy-control-group-error input[type="date"],.wy-control-group.wy-control-group-error input[type="month"],.wy-control-group.wy-control-group-error input[type="time"],.wy-control-group.wy-control-group-error input[type="datetime"],.wy-control-group.wy-control-group-error input[type="datetime-local"],.wy-control-group.wy-control-group-error input[type="week"],.wy-control-group.wy-control-group-error input[type="number"],.wy-control-group.wy-control-group-error input[type="search"],.wy-control-group.wy-control-group-error input[type="tel"],.wy-control-group.wy-control-group-error input[type="color"]{border:solid 1px #E74C3C}.wy-control-group.wy-control-group-error textarea{border:solid 1px #E74C3C}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27AE60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#E74C3C}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#E67E22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980B9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width: 480px){.wy-form button[type="submit"]{margin:.7em 0 0}.wy-form input[type="text"],.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:.3em;display:block}.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type="password"],.wy-form input[type="email"],.wy-form input[type="url"],.wy-form input[type="date"],.wy-form input[type="month"],.wy-form input[type="time"],.wy-form input[type="datetime"],.wy-form input[type="datetime-local"],.wy-form input[type="week"],.wy-form input[type="number"],.wy-form input[type="search"],.wy-form input[type="tel"],.wy-form input[type="color"]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0 0}.wy-form .wy-help-inline,.wy-form-message-inline,.wy-form-message{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width: 768px){.tablet-hide{display:none}}@media screen and (max-width: 480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.wy-table,.rst-content table.docutils,.rst-content table.field-list{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.wy-table caption,.rst-content table.docutils caption,.rst-content table.field-list caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td,.wy-table th,.rst-content table.docutils th,.rst-content table.field-list th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.wy-table td:first-child,.rst-content table.docutils td:first-child,.rst-content table.field-list td:first-child,.wy-table th:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list th:first-child{border-left-width:0}.wy-table thead,.rst-content table.docutils thead,.rst-content table.field-list thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.wy-table thead th,.rst-content table.docutils thead th,.rst-content table.field-list thead th{font-weight:bold;border-bottom:solid 2px #e1e4e5}.wy-table td,.rst-content table.docutils td,.rst-content table.field-list td{background-color:transparent;vertical-align:middle}.wy-table td p,.rst-content table.docutils td p,.rst-content table.field-list td p{line-height:18px}.wy-table td p:last-child,.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child{margin-bottom:0}.wy-table .wy-table-cell-min,.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min{width:1%;padding-right:0}.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox],.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:gray;font-size:90%}.wy-table-tertiary{color:gray;font-size:80%}.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td,.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td{background-color:#f3f6f6}.wy-table-backed{background-color:#f3f6f6}.wy-table-bordered-all,.rst-content table.docutils{border:1px solid #e1e4e5}.wy-table-bordered-all td,.rst-content table.docutils td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.wy-table-bordered-all tbody>tr:last-child td,.rst-content table.docutils tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px 0;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0 !important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980B9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9B59B6}html{height:100%;overflow-x:hidden}body{font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;font-weight:normal;color:#404040;min-height:100%;overflow-x:hidden;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#E67E22 !important}a.wy-text-warning:hover{color:#eb9950 !important}.wy-text-info{color:#2980B9 !important}a.wy-text-info:hover{color:#409ad5 !important}.wy-text-success{color:#27AE60 !important}a.wy-text-success:hover{color:#36d278 !important}.wy-text-danger{color:#E74C3C !important}a.wy-text-danger:hover{color:#ed7669 !important}.wy-text-neutral{color:#404040 !important}a.wy-text-neutral:hover{color:#595959 !important}h1,h2,.rst-content .toctree-wrapper p.caption,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif}p{line-height:24px;margin:0;font-size:16px;margin-bottom:24px}h1{font-size:175%}h2,.rst-content .toctree-wrapper p.caption{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}code,.rst-content tt,.rst-content code{white-space:nowrap;max-width:100%;background:#fff;border:solid 1px #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;color:#E74C3C;overflow-x:auto}code.code-large,.rst-content tt.code-large{font-size:90%}.wy-plain-list-disc,.rst-content .section ul,.rst-content .toctree-wrapper ul,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.wy-plain-list-disc li,.rst-content .section ul li,.rst-content .toctree-wrapper ul li,article ul li{list-style:disc;margin-left:24px}.wy-plain-list-disc li p:last-child,.rst-content .section ul li p:last-child,.rst-content .toctree-wrapper ul li p:last-child,article ul li p:last-child{margin-bottom:0}.wy-plain-list-disc li ul,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li ul,article ul li ul{margin-bottom:0}.wy-plain-list-disc li li,.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,article ul li li{list-style:circle}.wy-plain-list-disc li li li,.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,article ul li li li{list-style:square}.wy-plain-list-disc li ol li,.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,article ul li ol li{list-style:decimal}.wy-plain-list-decimal,.rst-content .section ol,.rst-content ol.arabic,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.wy-plain-list-decimal li,.rst-content .section ol li,.rst-content ol.arabic li,article ol li{list-style:decimal;margin-left:24px}.wy-plain-list-decimal li p:last-child,.rst-content .section ol li p:last-child,.rst-content ol.arabic li p:last-child,article ol li p:last-child{margin-bottom:0}.wy-plain-list-decimal li ul,.rst-content .section ol li ul,.rst-content ol.arabic li ul,article ol li ul{margin-bottom:0}.wy-plain-list-decimal li ul li,.rst-content .section ol li ul li,.rst-content ol.arabic li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:before,.wy-breadcrumbs:after{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs li{display:inline-block}.wy-breadcrumbs li.wy-breadcrumbs-aside{float:right}.wy-breadcrumbs li a{display:inline-block;padding:5px}.wy-breadcrumbs li a:first-child{padding-left:0}.wy-breadcrumbs li code,.wy-breadcrumbs li .rst-content tt,.rst-content .wy-breadcrumbs li tt{padding:5px;border:none;background:none}.wy-breadcrumbs li code.literal,.wy-breadcrumbs li .rst-content tt.literal,.rst-content .wy-breadcrumbs li tt.literal{color:#404040}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width: 480px){.wy-breadcrumbs-extra{display:none}.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:before,.wy-menu-horiz:after{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz ul,.wy-menu-horiz li{display:inline-block}.wy-menu-horiz li:hover{background:rgba(255,255,255,0.1)}.wy-menu-horiz li.divide-left{border-left:solid 1px #404040}.wy-menu-horiz li.divide-right{border-right:solid 1px #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{height:32px;display:inline-block;line-height:32px;padding:0 1.618em;margin-bottom:0;display:block;font-weight:bold;text-transform:uppercase;font-size:80%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:solid 1px #404040}.wy-menu-vertical li.divide-bottom{border-bottom:solid 1px #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:gray;border-right:solid 1px #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.wy-menu-vertical li code,.wy-menu-vertical li .rst-content tt,.rst-content .wy-menu-vertical li tt{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li span.toctree-expand{display:block;float:left;margin-left:-1.2em;font-size:.8em;line-height:1.6em;color:#4d4d4d}.wy-menu-vertical li.on a,.wy-menu-vertical li.current>a{color:#404040;padding:.4045em 1.618em;font-weight:bold;position:relative;background:#fcfcfc;border:none;padding-left:1.618em -4px}.wy-menu-vertical li.on a:hover,.wy-menu-vertical li.current>a:hover{background:#fcfcfc}.wy-menu-vertical li.on a:hover span.toctree-expand,.wy-menu-vertical li.current>a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.on a span.toctree-expand,.wy-menu-vertical li.current>a span.toctree-expand{display:block;font-size:.8em;line-height:1.6em;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:solid 1px #c9c9c9;border-top:solid 1px #c9c9c9}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a{color:#404040}.wy-menu-vertical li.toctree-l1.current li.toctree-l2>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>ul{display:none}.wy-menu-vertical li.toctree-l1.current li.toctree-l2.current>ul,.wy-menu-vertical li.toctree-l2.current li.toctree-l3.current>ul{display:block}.wy-menu-vertical li.toctree-l2.current>a{background:#c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{display:block;background:#c9c9c9;padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l2 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l2 span.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3{font-size:.9em}.wy-menu-vertical li.toctree-l3.current>a{background:#bdbdbd;padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{display:block;background:#bdbdbd;padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l3 a:hover span.toctree-expand{color:gray}.wy-menu-vertical li.toctree-l3 span.toctree-expand{color:#969696}.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:normal}.wy-menu-vertical a{display:inline-block;line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover span.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980B9;cursor:pointer;color:#fff}.wy-menu-vertical a:active span.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980B9;text-align:center;padding:.809em;display:block;color:#fcfcfc;margin-bottom:.809em}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em auto;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-side-nav-search>a,.wy-side-nav-search .wy-dropdown>a{color:#fcfcfc;font-size:100%;font-weight:bold;display:inline-block;padding:4px 6px;margin-bottom:.809em}.wy-side-nav-search>a:hover,.wy-side-nav-search .wy-dropdown>a:hover{background:rgba(255,255,255,0.1)}.wy-side-nav-search>a img.logo,.wy-side-nav-search .wy-dropdown>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search>a.icon img.logo,.wy-side-nav-search .wy-dropdown>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:normal;color:rgba(255,255,255,0.3)}.wy-nav .wy-menu-vertical header{color:#2980B9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980B9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980B9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:before,.wy-nav-top:after{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:bold}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980B9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,0.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:gray}footer p{margin-bottom:12px}footer span.commit code,footer span.commit .rst-content tt,.rst-content footer span.commit tt{padding:0px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;font-size:1em;background:none;border:none;color:gray}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:before,.rst-footer-buttons:after{width:100%}.rst-footer-buttons:before,.rst-footer-buttons:after{display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:before,.rst-breadcrumbs-buttons:after{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:solid 1px #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:solid 1px #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:gray;font-size:90%}@media screen and (max-width: 768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-side-scroll{width:auto}.wy-side-nav-search{width:auto}.wy-menu.wy-menu-vertical{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width: 1100px){.wy-nav-content-wrap{background:rgba(0,0,0,0.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,footer,.wy-nav-side{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .wy-menu-vertical li span.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version span.toctree-expand,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content p.caption .headerlink,.rst-content p.caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .icon{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content img{max-width:100%;height:auto}.rst-content div.figure{margin-bottom:24px}.rst-content div.figure p.caption{font-style:italic}.rst-content div.figure p:last-child.caption{margin-bottom:0px}.rst-content div.figure.align-center{text-align:center}.rst-content .section>img,.rst-content .section>a>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px 12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;display:block;overflow:auto}.rst-content pre.literal-block,.rst-content div[class^='highlight']{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px 0}.rst-content pre.literal-block div[class^='highlight'],.rst-content div[class^='highlight'] div[class^='highlight']{padding:0px;border:none;margin:0}.rst-content div[class^='highlight'] td.code{width:100%}.rst-content .linenodiv pre{border-right:solid 1px #e6e9ea;margin:0;padding:12px 12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^='highlight'] pre{white-space:pre;margin:0;padding:12px 12px;display:block;overflow:auto}.rst-content div[class^='highlight'] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content pre.literal-block,.rst-content div[class^='highlight'] pre,.rst-content .linenodiv pre{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;font-size:12px;line-height:1.4}@media print{.rst-content .codeblock,.rst-content div[class^='highlight'],.rst-content div[class^='highlight'] pre{white-space:pre-wrap}}.rst-content .note .last,.rst-content .attention .last,.rst-content .caution .last,.rst-content .danger .last,.rst-content .error .last,.rst-content .hint .last,.rst-content .important .last,.rst-content .tip .last,.rst-content .warning .last,.rst-content .seealso .last,.rst-content .admonition-todo .last,.rst-content .admonition .last{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,0.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent !important;border-color:rgba(0,0,0,0.1) !important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha li{list-style:upper-alpha}.rst-content .section ol p,.rst-content .section ul p{margin-bottom:12px}.rst-content .section ol p:last-child,.rst-content .section ul p:last-child{margin-bottom:24px}.rst-content .line-block{margin-left:0px;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0px}.rst-content .topic-title{font-weight:bold;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0px 0px 24px 24px}.rst-content .align-left{float:left;margin:0px 24px 24px 0px}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content .toctree-wrapper p.caption .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content dl dt .headerlink,.rst-content p.caption .headerlink,.rst-content table>caption .headerlink{visibility:hidden;font-size:14px}.rst-content h1 .headerlink:after,.rst-content h2 .headerlink:after,.rst-content .toctree-wrapper p.caption .headerlink:after,.rst-content h3 .headerlink:after,.rst-content h4 .headerlink:after,.rst-content h5 .headerlink:after,.rst-content h6 .headerlink:after,.rst-content dl dt .headerlink:after,.rst-content p.caption .headerlink:after,.rst-content table>caption .headerlink:after{content:"";font-family:FontAwesome}.rst-content h1:hover .headerlink:after,.rst-content h2:hover .headerlink:after,.rst-content .toctree-wrapper p.caption:hover .headerlink:after,.rst-content h3:hover .headerlink:after,.rst-content h4:hover .headerlink:after,.rst-content h5:hover .headerlink:after,.rst-content h6:hover .headerlink:after,.rst-content dl dt:hover .headerlink:after,.rst-content p.caption:hover .headerlink:after,.rst-content table>caption:hover .headerlink:after{visibility:visible}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:solid 1px #e1e4e5}.rst-content .sidebar p,.rst-content .sidebar ul,.rst-content .sidebar dl{font-size:90%}.rst-content .sidebar .last{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:"Roboto Slab","ff-tisa-web-pro","Georgia",Arial,sans-serif;font-weight:bold;background:#e1e4e5;padding:6px 12px;margin:-24px;margin-bottom:24px;font-size:100%}.rst-content .highlighted{background:#F1C40F;display:inline-block;font-weight:bold;padding:0 6px}.rst-content .footnote-reference,.rst-content .citation-reference{vertical-align:baseline;position:relative;top:-0.4em;line-height:0;font-size:90%}.rst-content table.docutils.citation,.rst-content table.docutils.footnote{background:none;border:none;color:gray}.rst-content table.docutils.citation td,.rst-content table.docutils.citation tr,.rst-content table.docutils.footnote td,.rst-content table.docutils.footnote tr{border:none;background-color:transparent !important;white-space:normal}.rst-content table.docutils.citation td.label,.rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}.rst-content table.docutils.citation tt,.rst-content table.docutils.citation code,.rst-content table.docutils.footnote tt,.rst-content table.docutils.footnote code{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}.rst-content table.docutils td .last,.rst-content table.docutils td .last :last-child{margin-bottom:0}.rst-content table.field-list{border:none}.rst-content table.field-list td{border:none}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content tt,.rst-content tt,.rst-content code{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace;padding:2px 5px}.rst-content tt big,.rst-content tt em,.rst-content tt big,.rst-content code big,.rst-content tt em,.rst-content code em{font-size:100% !important;line-height:normal}.rst-content tt.literal,.rst-content tt.literal,.rst-content code.literal{color:#E74C3C}.rst-content tt.xref,a .rst-content tt,.rst-content tt.xref,.rst-content code.xref,a .rst-content tt,a .rst-content code{font-weight:bold;color:#404040}.rst-content pre,.rst-content kbd,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",Courier,monospace}.rst-content a tt,.rst-content a tt,.rst-content a code{color:#2980B9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:bold;margin-bottom:12px}.rst-content dl p,.rst-content dl table,.rst-content dl ul,.rst-content dl ol{margin-bottom:12px !important}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl:not(.docutils){margin-bottom:24px}.rst-content dl:not(.docutils) dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980B9;border-top:solid 3px #6ab0de;padding:6px;position:relative}.rst-content dl:not(.docutils) dt:before{color:#6ab0de}.rst-content dl:not(.docutils) dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dl dt{margin-bottom:6px;border:none;border-left:solid 3px #ccc;background:#f0f0f0;color:#555}.rst-content dl:not(.docutils) dl dt .headerlink{color:#404040;font-size:100% !important}.rst-content dl:not(.docutils) dt:first-child{margin-top:0}.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) tt,.rst-content dl:not(.docutils) code{font-weight:bold}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname,.rst-content dl:not(.docutils) tt.descclassname,.rst-content dl:not(.docutils) code.descclassname{background-color:transparent;border:none;padding:0;font-size:100% !important}.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) tt.descname,.rst-content dl:not(.docutils) code.descname{font-weight:bold}.rst-content dl:not(.docutils) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:bold}.rst-content dl:not(.docutils) .property{display:inline-block;padding-right:8px}.rst-content .viewcode-link,.rst-content .viewcode-back{display:inline-block;color:#27AE60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:bold}.rst-content tt.download,.rst-content code.download{background:inherit;padding:inherit;font-weight:normal;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content tt.download span:first-child,.rst-content code.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content tt.download span:first-child:before,.rst-content code.download span:first-child:before{margin-right:4px}.rst-content .guilabel{border:1px solid #7fbbe3;background:#e7f2fa;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .versionmodified{font-style:italic}@media screen and (max-width: 480px){.rst-content .sidebar{width:100%}}span[id*='MathJax-Span']{color:#404040}.math{text-align:center}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-regular.eot");src:url("../fonts/Lato/lato-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-regular.woff2") format("woff2"),url("../fonts/Lato/lato-regular.woff") format("woff"),url("../fonts/Lato/lato-regular.ttf") format("truetype");font-weight:400;font-style:normal}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-bold.eot");src:url("../fonts/Lato/lato-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-bold.woff2") format("woff2"),url("../fonts/Lato/lato-bold.woff") format("woff"),url("../fonts/Lato/lato-bold.ttf") format("truetype");font-weight:700;font-style:normal}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-bolditalic.eot");src:url("../fonts/Lato/lato-bolditalic.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-bolditalic.woff2") format("woff2"),url("../fonts/Lato/lato-bolditalic.woff") format("woff"),url("../fonts/Lato/lato-bolditalic.ttf") format("truetype");font-weight:700;font-style:italic}@font-face{font-family:"Lato";src:url("../fonts/Lato/lato-italic.eot");src:url("../fonts/Lato/lato-italic.eot?#iefix") format("embedded-opentype"),url("../fonts/Lato/lato-italic.woff2") format("woff2"),url("../fonts/Lato/lato-italic.woff") format("woff"),url("../fonts/Lato/lato-italic.ttf") format("truetype");font-weight:400;font-style:italic}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:400;src:url("../fonts/RobotoSlab/roboto-slab.eot");src:url("../fonts/RobotoSlab/roboto-slab-v7-regular.eot?#iefix") format("embedded-opentype"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.woff2") format("woff2"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.woff") format("woff"),url("../fonts/RobotoSlab/roboto-slab-v7-regular.ttf") format("truetype")}@font-face{font-family:"Roboto Slab";font-style:normal;font-weight:700;src:url("../fonts/RobotoSlab/roboto-slab-v7-bold.eot");src:url("../fonts/RobotoSlab/roboto-slab-v7-bold.eot?#iefix") format("embedded-opentype"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.woff2") format("woff2"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.woff") format("woff"),url("../fonts/RobotoSlab/roboto-slab-v7-bold.ttf") format("truetype")} diff --git a/docs/html/_static/doctools.js b/docs/html/_static/doctools.js new file mode 100644 index 0000000000..ffadbec11f --- /dev/null +++ b/docs/html/_static/doctools.js @@ -0,0 +1,315 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var bbox = span.getBBox(); + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + var parentOfText = node.parentNode.parentNode; + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { + this.initOnKeyListeners(); + } + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated === 'undefined') + return string; + return (typeof translated === 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated === 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash && $.browser.mozilla) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) === 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this === '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); diff --git a/docs/html/_static/documentation_options.js b/docs/html/_static/documentation_options.js new file mode 100644 index 0000000000..850a911304 --- /dev/null +++ b/docs/html/_static/documentation_options.js @@ -0,0 +1,10 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '0.5', + LANGUAGE: 'None', + COLLAPSE_INDEX: false, + FILE_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, +}; \ No newline at end of file diff --git a/docs/html/_static/down-pressed.png b/docs/html/_static/down-pressed.png new file mode 100644 index 0000000000..5756c8cad8 Binary files /dev/null and b/docs/html/_static/down-pressed.png differ diff --git a/docs/html/_static/down.png b/docs/html/_static/down.png new file mode 100644 index 0000000000..1b3bdad2ce Binary files /dev/null and b/docs/html/_static/down.png differ diff --git a/docs/html/_static/favicon.ico b/docs/html/_static/favicon.ico new file mode 100644 index 0000000000..bb439c0fcb Binary files /dev/null and b/docs/html/_static/favicon.ico differ diff --git a/docs/html/_static/file.png b/docs/html/_static/file.png new file mode 100644 index 0000000000..a858a410e4 Binary files /dev/null and b/docs/html/_static/file.png differ diff --git a/docs/html/_static/fonts/Inconsolata-Bold.ttf b/docs/html/_static/fonts/Inconsolata-Bold.ttf new file mode 100644 index 0000000000..809c1f5828 Binary files /dev/null and b/docs/html/_static/fonts/Inconsolata-Bold.ttf differ diff --git a/docs/html/_static/fonts/Inconsolata-Regular.ttf b/docs/html/_static/fonts/Inconsolata-Regular.ttf new file mode 100644 index 0000000000..fc981ce7ad Binary files /dev/null and b/docs/html/_static/fonts/Inconsolata-Regular.ttf differ diff --git a/docs/html/_static/fonts/Inconsolata.ttf b/docs/html/_static/fonts/Inconsolata.ttf new file mode 100644 index 0000000000..4b8a36d249 Binary files /dev/null and b/docs/html/_static/fonts/Inconsolata.ttf differ diff --git a/docs/html/_static/fonts/Lato-Bold.ttf b/docs/html/_static/fonts/Lato-Bold.ttf new file mode 100644 index 0000000000..1d23c7066e Binary files /dev/null and b/docs/html/_static/fonts/Lato-Bold.ttf differ diff --git a/docs/html/_static/fonts/Lato-Regular.ttf b/docs/html/_static/fonts/Lato-Regular.ttf new file mode 100644 index 0000000000..0f3d0f837d Binary files /dev/null and b/docs/html/_static/fonts/Lato-Regular.ttf differ diff --git a/docs/html/_static/fonts/Lato/lato-bold.eot b/docs/html/_static/fonts/Lato/lato-bold.eot new file mode 100644 index 0000000000..3361183a41 Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-bold.eot differ diff --git a/docs/html/_static/fonts/Lato/lato-bold.ttf b/docs/html/_static/fonts/Lato/lato-bold.ttf new file mode 100644 index 0000000000..29f691d5ed Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-bold.ttf differ diff --git a/docs/html/_static/fonts/Lato/lato-bold.woff b/docs/html/_static/fonts/Lato/lato-bold.woff new file mode 100644 index 0000000000..c6dff51f06 Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-bold.woff differ diff --git a/docs/html/_static/fonts/Lato/lato-bold.woff2 b/docs/html/_static/fonts/Lato/lato-bold.woff2 new file mode 100644 index 0000000000..bb195043cf Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-bold.woff2 differ diff --git a/docs/html/_static/fonts/Lato/lato-bolditalic.eot b/docs/html/_static/fonts/Lato/lato-bolditalic.eot new file mode 100644 index 0000000000..3d4154936b Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-bolditalic.eot differ diff --git a/docs/html/_static/fonts/Lato/lato-bolditalic.ttf b/docs/html/_static/fonts/Lato/lato-bolditalic.ttf new file mode 100644 index 0000000000..f402040b3e Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-bolditalic.ttf differ diff --git a/docs/html/_static/fonts/Lato/lato-bolditalic.woff b/docs/html/_static/fonts/Lato/lato-bolditalic.woff new file mode 100644 index 0000000000..88ad05b9ff Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-bolditalic.woff differ diff --git a/docs/html/_static/fonts/Lato/lato-bolditalic.woff2 b/docs/html/_static/fonts/Lato/lato-bolditalic.woff2 new file mode 100644 index 0000000000..c4e3d804b5 Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-bolditalic.woff2 differ diff --git a/docs/html/_static/fonts/Lato/lato-italic.eot b/docs/html/_static/fonts/Lato/lato-italic.eot new file mode 100644 index 0000000000..3f826421a1 Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-italic.eot differ diff --git a/docs/html/_static/fonts/Lato/lato-italic.ttf b/docs/html/_static/fonts/Lato/lato-italic.ttf new file mode 100644 index 0000000000..b4bfc9b24a Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-italic.ttf differ diff --git a/docs/html/_static/fonts/Lato/lato-italic.woff b/docs/html/_static/fonts/Lato/lato-italic.woff new file mode 100644 index 0000000000..76114bc033 Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-italic.woff differ diff --git a/docs/html/_static/fonts/Lato/lato-italic.woff2 b/docs/html/_static/fonts/Lato/lato-italic.woff2 new file mode 100644 index 0000000000..3404f37e2e Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-italic.woff2 differ diff --git a/docs/html/_static/fonts/Lato/lato-regular.eot b/docs/html/_static/fonts/Lato/lato-regular.eot new file mode 100644 index 0000000000..11e3f2a5f0 Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-regular.eot differ diff --git a/docs/html/_static/fonts/Lato/lato-regular.ttf b/docs/html/_static/fonts/Lato/lato-regular.ttf new file mode 100644 index 0000000000..74decd9ebb Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-regular.ttf differ diff --git a/docs/html/_static/fonts/Lato/lato-regular.woff b/docs/html/_static/fonts/Lato/lato-regular.woff new file mode 100644 index 0000000000..ae1307ff5f Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-regular.woff differ diff --git a/docs/html/_static/fonts/Lato/lato-regular.woff2 b/docs/html/_static/fonts/Lato/lato-regular.woff2 new file mode 100644 index 0000000000..3bf9843328 Binary files /dev/null and b/docs/html/_static/fonts/Lato/lato-regular.woff2 differ diff --git a/docs/html/_static/fonts/RobotoSlab-Bold.ttf b/docs/html/_static/fonts/RobotoSlab-Bold.ttf new file mode 100644 index 0000000000..df5d1df273 Binary files /dev/null and b/docs/html/_static/fonts/RobotoSlab-Bold.ttf differ diff --git a/docs/html/_static/fonts/RobotoSlab-Regular.ttf b/docs/html/_static/fonts/RobotoSlab-Regular.ttf new file mode 100644 index 0000000000..eb52a79073 Binary files /dev/null and b/docs/html/_static/fonts/RobotoSlab-Regular.ttf differ diff --git a/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot new file mode 100644 index 0000000000..79dc8efed3 Binary files /dev/null and b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot differ diff --git a/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf new file mode 100644 index 0000000000..df5d1df273 Binary files /dev/null and b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf differ diff --git a/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff new file mode 100644 index 0000000000..6cb6000018 Binary files /dev/null and b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff differ diff --git a/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 new file mode 100644 index 0000000000..7059e23142 Binary files /dev/null and b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 differ diff --git a/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot new file mode 100644 index 0000000000..2f7ca78a1e Binary files /dev/null and b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot differ diff --git a/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf new file mode 100644 index 0000000000..eb52a79073 Binary files /dev/null and b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf differ diff --git a/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff new file mode 100644 index 0000000000..f815f63f99 Binary files /dev/null and b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff differ diff --git a/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 new file mode 100644 index 0000000000..f2c76e5bda Binary files /dev/null and b/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 differ diff --git a/docs/html/_static/fonts/fontawesome-webfont.eot b/docs/html/_static/fonts/fontawesome-webfont.eot new file mode 100644 index 0000000000..e9f60ca953 Binary files /dev/null and b/docs/html/_static/fonts/fontawesome-webfont.eot differ diff --git a/docs/html/_static/fonts/fontawesome-webfont.svg b/docs/html/_static/fonts/fontawesome-webfont.svg new file mode 100644 index 0000000000..855c845e53 --- /dev/null +++ b/docs/html/_static/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/html/_static/fonts/fontawesome-webfont.ttf b/docs/html/_static/fonts/fontawesome-webfont.ttf new file mode 100644 index 0000000000..35acda2fa1 Binary files /dev/null and b/docs/html/_static/fonts/fontawesome-webfont.ttf differ diff --git a/docs/html/_static/fonts/fontawesome-webfont.woff b/docs/html/_static/fonts/fontawesome-webfont.woff new file mode 100644 index 0000000000..400014a4b0 Binary files /dev/null and b/docs/html/_static/fonts/fontawesome-webfont.woff differ diff --git a/docs/html/_static/fonts/fontawesome-webfont.woff2 b/docs/html/_static/fonts/fontawesome-webfont.woff2 new file mode 100644 index 0000000000..4d13fc6040 Binary files /dev/null and b/docs/html/_static/fonts/fontawesome-webfont.woff2 differ diff --git a/docs/html/_static/jquery-3.2.1.js b/docs/html/_static/jquery-3.2.1.js new file mode 100644 index 0000000000..d2d8ca4790 --- /dev/null +++ b/docs/html/_static/jquery-3.2.1.js @@ -0,0 +1,10253 @@ +/*! + * jQuery JavaScript Library v3.2.1 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2017-03-20T18:59Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + + + + function DOMEval( code, doc ) { + doc = doc || document; + + var script = doc.createElement( "script" ); + + script.text = code; + doc.head.appendChild( script ).parentNode.removeChild( script ); + } +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.2.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + + if ( copyIsArray ) { + copyIsArray = false; + clone = src && Array.isArray( src ) ? src : []; + + } else { + clone = src && jQuery.isPlainObject( src ) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isFunction: function( obj ) { + return jQuery.type( obj ) === "function"; + }, + + isWindow: function( obj ) { + return obj != null && obj === obj.window; + }, + + isNumeric: function( obj ) { + + // As of jQuery 3.0, isNumeric is limited to + // strings and numbers (primitives or objects) + // that can be coerced to finite numbers (gh-2662) + var type = jQuery.type( obj ); + return ( type === "number" || type === "string" ) && + + // parseFloat NaNs numeric-cast false positives ("") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + !isNaN( obj - parseFloat( obj ) ); + }, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + globalEval: function( code ) { + DOMEval( code ); + }, + + // Convert dashed to camelCase; used by the css and data modules + // Support: IE <=9 - 11, Edge 12 - 13 + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: Date.now, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.3 + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2016-08-08 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true && ("form" in elem || "label" in elem); + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + disabledAncestor( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; + }); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + }); + + // ID filter and find + if ( support.getById ) { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( el ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } + }); + + assert(function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( el ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + !compilerCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch (e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context === document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + if ( !context && elem.ownerDocument !== document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context || document, xml) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( el ) { + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( el ) { + return el.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +}; +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +var risSimple = /^.[^:#\[\.,]*$/; + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Simple selector that can be filtered directly, removing non-Elements + if ( risSimple.test( qualifier ) ) { + return jQuery.filter( qualifier, elements, not ); + } + + // Complex selector, compare the two sets, removing non-Elements + qualifier = jQuery.filter( qualifier, elements ); + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; + } ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( jQuery.isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( nodeName( elem, "iframe" ) ) { + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( jQuery.isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( jQuery.isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the master Deferred + master = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + master.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( master.state() === "pending" || + jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return master.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); + } + + return master.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( jQuery.type( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !jQuery.isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ jQuery.camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ jQuery.camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( jQuery.camelCase ); + } else { + key = jQuery.camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + jQuery.contains( elem.ownerDocument, elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + +var swap = function( elem, options, callback, args ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.apply( elem, args || [] ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, + scale = 1, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + do { + + // If previous iteration zeroed out, double until we get *something*. + // Use string for doubling so we don't accidentally see scale as unchanged below + scale = scale || ".5"; + + // Adjust and apply + initialInUnit = initialInUnit / scale; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Update scale, tolerating zero or NaN from tween.cur() + // Break the loop if scale is unchanged or perfect, or if we've just had enough. + } while ( + scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations + ); + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); + +var rscriptType = ( /^$|\/(?:java|ecma)script/i ); + + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // Support: IE <=9 only + option: [ 1, "" ], + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +// Support: IE <=9 only +wrapMap.optgroup = wrapMap.option; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, contains, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( jQuery.type( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; +} )(); +var documentElement = document.documentElement; + + + +var + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 only +// See #13393 for more info +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = {}; + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + // Make a writable jQuery.Event from the native event object + var event = jQuery.event.fix( nativeEvent ); + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or 2) have namespace(s) + // a subset or equal to those in the bound event (both can have no namespace). + if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: jQuery.isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + this.focus(); + return false; + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + + which: function( event ) { + var button = event.button; + + // Add which for key events + if ( event.which == null && rkeyEvent.test( event.type ) ) { + return event.charCode != null ? event.charCode : event.keyCode; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { + if ( button & 1 ) { + return 1; + } + + if ( button & 2 ) { + return 3; + } + + if ( button & 4 ) { + return 2; + } + + return 0; + } + + return event.which; + } +}, jQuery.event.addProp ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + /* eslint-disable max-len */ + + // See https://github.com/eslint/eslint/issues/3229 + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, + + /* eslint-enable */ + + // Support: IE <=10 - 11, Edge 12 - 13 + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( ">tbody", elem )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + var match = rscriptTypeMasked.exec( elem.type ); + + if ( match ) { + elem.type = match[ 1 ]; + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.access( src ); + pdataCur = dataPriv.set( dest, pdataOld ); + events = pdataOld.events; + + if ( events ) { + delete pdataCur.handle; + pdataCur.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + isFunction = jQuery.isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( isFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( isFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html.replace( rxhtmlTag, "<$1>" ); + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = jQuery.contains( elem.ownerDocument, elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rmargin = ( /^margin/ ); + +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + div.style.cssText = + "box-sizing:border-box;" + + "position:relative;display:block;" + + "margin:auto;border:1px;padding:1px;" + + "top:1%;width:50%"; + div.innerHTML = ""; + documentElement.appendChild( container ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = divStyle.marginLeft === "2px"; + boxSizingReliableVal = divStyle.width === "4px"; + + // Support: Android 4.0 - 4.3 only + // Some styles come back with percentage values, even though they shouldn't + div.style.marginRight = "50%"; + pixelMarginRightVal = divStyle.marginRight === "4px"; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + + "padding:0;margin-top:1px;position:absolute"; + container.appendChild( div ); + + jQuery.extend( support, { + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelMarginRight: function() { + computeStyleTests(); + return pixelMarginRightVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }, + + cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style; + +// Return a css property mapped to a potentially vendor prefixed property +function vendorPropName( name ) { + + // Shortcut for names that are not vendor prefixed + if ( name in emptyStyle ) { + return name; + } + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a property mapped along what jQuery.cssProps suggests or to +// a vendor prefixed property. +function finalPropName( name ) { + var ret = jQuery.cssProps[ name ]; + if ( !ret ) { + ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; + } + return ret; +} + +function setPositiveNumber( elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { + var i, + val = 0; + + // If we already have the right measurement, avoid augmentation + if ( extra === ( isBorderBox ? "border" : "content" ) ) { + i = 4; + + // Otherwise initialize for horizontal or vertical properties + } else { + i = name === "width" ? 1 : 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin, so add it if we want it + if ( extra === "margin" ) { + val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); + } + + if ( isBorderBox ) { + + // border-box includes padding, so remove it if we want content + if ( extra === "content" ) { + val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // At this point, extra isn't border nor margin, so remove border + if ( extra !== "margin" ) { + val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } else { + + // At this point, extra isn't content, so add padding + val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // At this point, extra isn't content nor padding, so add border + if ( extra !== "padding" ) { + val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + return val; +} + +function getWidthOrHeight( elem, name, extra ) { + + // Start with computed style + var valueIsBorderBox, + styles = getStyles( elem ), + val = curCSS( elem, name, styles ), + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Computed unit is not pixels. Stop here and return. + if ( rnumnonpx.test( val ) ) { + return val; + } + + // Check for style in case a browser which returns unreliable values + // for getComputedStyle silently falls back to the reliable elem.style + valueIsBorderBox = isBorderBox && + ( support.boxSizingReliable() || val === elem.style[ name ] ); + + // Fall back to offsetWidth/Height when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + if ( val === "auto" ) { + val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; + } + + // Normalize "", auto, and prepare for extra + val = parseFloat( val ) || 0; + + // Use the active box-sizing model to add/subtract irrelevant styles + return ( val + + augmentWidthOrHeight( + elem, + name, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: { + "float": "cssFloat" + }, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + if ( type === "number" ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( i, name ) { + jQuery.cssHooks[ name ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, name, extra ); + } ) : + getWidthOrHeight( elem, name, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = extra && getStyles( elem ), + subtract = extra && augmentWidthOrHeight( + elem, + name, + extra, + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + styles + ); + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ name ] = value; + value = jQuery.css( elem, name ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( !rmargin.test( prefix ) ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && + ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || + jQuery.cssHooks[ tween.prop ] ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = jQuery.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 13 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = jQuery.camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( jQuery.isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + jQuery.proxy( result.stop, result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( jQuery.isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( jQuery.isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + jQuery.isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( jQuery.isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = jQuery.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value; + + if ( typeof stateVal === "boolean" && type === "string" ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( jQuery.isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( type === "string" ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = value.match( rnothtmlwhite ) || []; + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, isFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + isFunction = jQuery.isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( isFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + elem[ type ](); + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + + "change select submit keydown keypress keyup contextmenu" ).split( " " ), + function( i, name ) { + + // Handle event binding + jQuery.fn[ name ] = function( data, fn ) { + return arguments.length > 0 ? + this.on( name, null, data, fn ) : + this.trigger( name ); + }; +} ); + +jQuery.fn.extend( { + hover: function( fnOver, fnOut ) { + return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); + } +} ); + + + + +support.focusin = "onfocusin" in window; + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = jQuery.now(); + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) { + xml = undefined; + } + + if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && jQuery.type( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = jQuery.isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ) + .filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ) + .map( function( i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( jQuery.isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; + } + } + match = responseHeaders[ key.toLowerCase() ]; + } + return match == null ? null : match; + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 13 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available, append data to url + if ( s.data ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( jQuery.isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + + +jQuery._evalUrl = function( url ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + "throws": true + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( jQuery.isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( jQuery.isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var isFunction = jQuery.isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain requests + if ( s.crossDomain ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

ActionKitÂś

+
+

OverviewÂś

+

ActionKit is a platform for advocacy, fundraising, and +get-out-the-vote. This Parsons integration with the +ActionKit REST API +supports fetching, creating, and updating records of campaigns, events, and users. +Bulk upload of new users and user updates is also supported.

+
+

Note

+
+
Authentication
+
ActionKit requires HTTP Basic Auth. +Clients with an ActionKit account can obtain the domain, username, and password needed +to access the ActionKit API. See the ActionKit REST API Authentication +documentation for more information on obtaining ActionKit API credentials.
+
+
+
+
+

QuickstartÂś

+

To instantiate the ActionKit class, you can either store your ActionKit API +domain, username, and password as environmental variables (ACTION_KIT_DOMAIN, +ACTION_KIT_USERNAME, and ACTION_KIT_PASSWORD, respectively) or pass in your +domain, username, and password as arguments:

+
from parsons import ActionKit
+
+# First approach: Use API credentials via environmental variables
+ak = ActionKit()
+
+# Second approach: Pass API credentials as arguments
+ak = ActionKit(domain='myorg.actionkit.com', username='my_name', password='1234')
+
+
+

You can then call various endpoints:

+
# Create a new user
+ak.create_user(email='john@email', first_name='John', last_name='Smith', city='Boston')
+
+# Fetch user fields
+user_fields = ak.get_user(user_id='123')
+
+# Update user fields
+ak.update_user(user_id='123', city='New York')
+
+# Delete uer
+ak.delete_user(user_id='123')
+
+
+
+
+

APIÂś

+
+
+class parsons.ActionKit(domain=None, username=None, password=None)[source]Âś
+

Instantiate the ActionKit class

+
+
Args:
+
+
domain: str
+
The ActionKit domain (e.g. myorg.actionkit.com) Not required if +ACTION_KIT_DOMAIN env variable set.
+
username: str
+
The authorized ActionKit username. Not required if ACTION_KIT_USERNAME env +variable set.
+
password: str
+
The authorized ActionKit user password. Not required if ACTION_KIT_PASSWORD +env variable set.
+
+
+
+
+
+get_user(user_id)[source]Âś
+

Get a user.

+
+
Args:
+
+
user_id: int
+
The user id of the record to get.
+
+
+
Returns:
+
User json object
+
+
+ +
+
+get_user_fields()[source]Âś
+

Get list of valid user fields that can be passed with the +ActionKit.create_user() method.

+
+
Returns:
+
List of user fields
+
+
+ +
+
+create_user(email, **kwargs)[source]Âś
+

Create a user.

+
+
Args:
+
+
email: str
+
Email for the user
+
**kwargs:
+
Optional arguments and fields to pass to the client. A full list can be found +in the ActionKit API Documentation.
+
+
+
Returns:
+
User json object
+
+
+ +
+
+update_user(user_id, **kwargs)[source]Âś
+

Update a user.

+
+
Args:
+
+
user_id: int
+
The user id of the person to update
+
**kwargs:
+
Optional arguments and fields to pass to the client. A full list can be found +in the ActionKit API Documentation.
+
+
+
Returns:
+
None
+
+
+ +
+
+update_event(event_id, **kwargs)[source]Âś
+

Update an event.

+
+
Args:
+
+
event_id: int
+
The event id of the event to update
+
**kwargs:
+
Optional arguments and fields to pass to the client. A full list can be found +in the ActionKit API Documentation.
+
+
+
Returns:
+
None
+
+
+ +
+
+delete_user(user_id)[source]Âś
+

Delete a user.

+
+
Args:
+
+
user_id: int
+
The user id of the person to delete
+
+
+
Returns:
+
None
+
+
+ +
+
+get_campaign(campaign_id)[source]Âś
+

Get a campaign.

+
+
Args:
+
+
campaign_id: int
+
The campaign id of the record.
+
+
+
Returns:
+
Campaign json object
+
+
+ +
+
+get_campaign_fields()[source]Âś
+

Get list of valid campaign fields that can be passed with the +ActionKit.create_campaign() and ActionKit.update_campaign() methods.

+
+
Returns:
+
List of campaign fields
+
+
+ +
+
+create_campaign(name, **kwargs)[source]Âś
+

Create a campaign.

+
+
Args:
+
+
name: str
+
The name of the campaign to create
+
**kwargs:
+
Optional arguments and fields to pass to the client. A full list can be found +in the ActionKit API Documentation.
+
+
+
Returns:
+
API location of new resource
+
+
+ +
+
+get_event_create_page(event_create_page_id)[source]Âś
+

Get a event create page.

+
+
Args:
+
+
event_create_page_id: int
+
The event create page id of the record to get.
+
+
+
Returns:
+
Event create page json object
+
+
+ +
+
+get_event_create_page_fields()[source]Âś
+

Get list of event create page fields that can be passed with the +ActionKit.create_event_create_page().

+
+
Returns:
+
List of event create page fields
+
+
+ +
+
+create_event_create_page(name, campaign_id, title, **kwargs)[source]Âś
+

Add an event page to a campaign.

+
+
Args:
+
+
campaign_id: int
+
The campaign to assoicate page with
+
name: str
+
The name of the page to create
+
title: str
+
The title of the page to create
+
**kwargs:
+
Optional arguments and fields to pass to the client. A full list can be found +in the ActionKit API Documentation.
+
+
+
Returns:
+
API location of new resource
+
+
+ +
+
+get_event_create_form(event_create_form_id)[source]Âś
+

Get a event create form.

+
+
Args:
+
+
event_create_form_id: int
+
The event create form id of the record to get.
+
+
+
Returns:
+
Event create form json object
+
+
+ +
+
+get_event_create_form_fields()[source]Âś
+

Get list of valid event create form fields that can be passed with the +ActionKit.create_event_create_form() method.

+
+
Returns:
+
List of event create form fields
+
+
+ +
+
+create_event_create_form(page_id, thank_you_text, **kwargs)[source]Âś
+

Create a event create form.

+
+
Args:
+
+
page_id: int
+
The page to associate the form with
+
thank_you_text: str
+
Free form thank you text
+
**kwargs:
+
Optional arguments and fields to pass to the client. A full list can be found +in the ActionKit API Documentation.
+
+
+
Returns:
+
API location of new resource
+
+
+ +
+
+get_event_signup_page(event_signup_page_id)[source]Âś
+

Get event signup page.

+
+
Args:
+
+
event_signup_page_id: int
+
The event signup page id of the record to get.
+
+
+
Returns:
+
Event signup page json object
+
+
+ +
+
+get_event_signup_page_fields()[source]Âś
+

Get list of valid event signup page fields that can be passed with the +ActionKit.create_event_signup_page() method.

+
+
Returns:
+
List of event signup page fields
+
+
+ +
+
+create_event_signup_page(name, campaign_id, title, **kwargs)[source]Âś
+

Add an event signup page to a campaign.

+
+
Args:
+
+
campaign_id: int
+
The campaign to assoicate page with
+
name: str
+
The name of the page to create
+
title: str
+
The title of the page to create
+
**kwargs:
+
Optional arguments and fields to pass to the client. A full list can be found +in the ActionKit API Documentation.
+
+
+
Returns:
+
API location of new resource
+
+
+ +
+
+get_event_signup_form(event_signup_form_id)[source]Âś
+

Get a user.

+
+
Args:
+
+
event_signup_form_id: str
+
The event signup form id of the record to get.
+
+
+
Returns:
+
Event signup form json object
+
+
+ +
+
+get_event_signup_form_fields()[source]Âś
+

Get list of valid event signup form fields that can be passed with the +ActionKit.create_event_signup_form() method.

+
+
Returns:
+
List of event signup form fields
+
+
+ +
+
+create_event_signup_form(page_id, thank_you_text, **kwargs)[source]Âś
+

Create a event signup form.

+
+
Args:
+
+
page_id: int
+
The page to associate the form with
+
thank_you_text: str
+
Free form thank you text
+
**kwargs:
+
Optional arguments and fields to pass to the client. A full list can be found +in the ActionKit API Documentation.
+
+
+
Returns:
+
API location of new resource
+
+
+ +
+
+update_event_signup(event_signup_id, **kwargs)[source]Âś
+

Update an event signup.

+
+
Args:
+
+
event_signup_id: int
+
The id of the event signup to update
+
event_signup_dict: dict
+
A dictionary of fields to update for the event signup.
+
**kwargs:
+
Optional arguments and fields to pass to the client. A full list can be found +in the ActionKit API Documentation.
+
+
+
Returns:
+
None
+
+
+ +
+
+get_page_followup(page_followup_id)[source]Âś
+

Get a page followup.

+
+
Args:
+
+
page_followup_id: int
+
The user id of the record to get.
+
+
+
Returns:
+
Page followup json object
+
+
+ +
+
+get_page_followup_fields()[source]Âś
+

Get list of valid page followup fields that can be passed with the +ActionKit.create_page_followup() method.

+
+
Returns:
+
List of page followup fields
+
+
+ +
+
+create_page_followup(signup_page_id, url, **kwargs)[source]Âś
+

Add a page followup.

+
+
Args:
+
+
signup_page_id: int
+
The signup page to associate the followup page with
+
url: str
+
URL of the folloup page
+
**kwargs:
+
Optional arguments and fields to pass to the client. A full list can be found +in the ActionKit API Documentation.
+
+
+
Returns:
+
API location of new resource
+
+
+ +
+
+create_generic_action(page, email=None, ak_id=None, **kwargs)[source]Âś
+

Post a generic action. One of ak_id or email is a required argument.

+
+
Args:
+
+
page:
+
The page to post the action. The page short name.
+
email:
+
The email address of the user to post the action.
+
ak_id:
+
The action kit id of the record.
+
**kwargs:
+
Optional arguments and fields to pass to the client. A full list can be found +in the ActionKit API Documentation.
+
+
+
Returns:
+
+
dict
+
The response json
+
+
+
+
+ +
+
+bulk_upload_csv(csv_file, import_page, autocreate_user_fields=False, user_fields_only=False)[source]Âś
+

Bulk upload a csv file of new users or user updates. +If you are uploading a table object, use bulk_upload_table instead. +See ActionKit User Upload Documentation +Be careful that blank values in columns will overwrite existing data.

+

If you get a 500 error, try sending a much smaller file (say, one row), +which is more likely to return the proper 400 with a useful error message

+
+
Args:
+
+
import_page: str
+
The page to post the action. The page short name.
+
csv_file: str or buffer
+
The csv (optionally zip’d) file path or a file buffer object +A user_id or email column is required. +ActionKit rejects files that are larger than 128M
+
autocreate_user_fields: bool
+
When True columns starting with “user_” will be uploaded as user fields. +See the autocreate_user_fields documentation.
+
user_fields_only: bool
+
When uploading only an email/user_id column and user_ user fields, +ActionKit has a fast processing path. +This doesn’t work, if you upload a zipped csv though.
+
+
+
Returns:
+
+
dict
+
success: whether upload was successful +progress_url: an API URL to get progress on upload processing +res: requests http response object
+
+
+
+
+ +
+
+bulk_upload_table(table, import_page, autocreate_user_fields=0, no_overwrite_on_empty=False, set_only_columns=None)[source]Âś
+

Bulk upload a table of new users or user updates. +See ActionKit User Upload Documentation +Be careful that blank values in columns will overwrite existing data.

+

Tables with only an identifying column (user_id/email) and user_ user fields +will be fast-processed – this is useful for setting/updating user fields.

+
+

Note

+

If you get a 500 error, try sending a much smaller file (say, one row), +which is more likely to return the proper 400 with a useful error message

+
+
+
Args:
+
+
import_page: str
+
The page to post the action. The page short name.
+
table: Table Class
+
A Table of user data to bulk upload +A user_id or email column is required.
+
autocreate_user_fields: bool
+
When True columns starting with “user_” will be uploaded as user fields. +ActionKit. +See the autocreate_user_fields documentation.
+
no_overwrite_on_empty: bool
+
When uploading user data, ActionKit will, by default, take a blank value +and overwrite existing data for that user. +This can be undesirable, if the goal is to only send updates. +Setting this to True will divide up the table into multiple upload +batches, changing the columns uploaded based on permutations of +empty columns.
+
set_only_columns: list
+
This is similar to no_overwrite_on_empty but restricts to a specific set of columns +which, if blank, should not be overwritten.
+
+
+
Returns:
+
+
dict
+

success: bool – whether upload was successful (individual rows may not have been) +results: [dict] – This is a list of the full results.

+
+
progress_url and res for any results
+
+
+
+
+
+ +
+
+collect_upload_errors(result_array)[source]Âś
+

Collect any upload errors as a list of objects from bulk_upload_table ‘results’ key value. +This waits for uploads to complete, so it may take some time if you uploaded a large file. +Args:

+
+
+
result_array: list
+
After receiving a dict back from bulk_upload_table you may want to see if there +were any errors in the uploads. If you call collect_upload_errors(result_array) +it will iterate across each of the uploads fetching the final result of e.g. +/rest/v1/uploaderror?upload=123
+
+
+
+
Returns:
+
+
[dict]
+
message: str – error message +upload: str – upload progress API path e.g. “/rest/v1/upload/123456/” +id: int – upload error record id (different than upload id)
+
+
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/aws.html b/docs/html/aws.html new file mode 100644 index 0000000000..5cb770bd8a --- /dev/null +++ b/docs/html/aws.html @@ -0,0 +1,1777 @@ + + + + + + + + + + + Amazon Web Services — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Amazon Web ServicesÂś

+
+

LambdaÂś

+
+

APIÂś

+
+
+parsons.aws.distribute_task(table, func_to_run, bucket=None, func_kwargs=None, func_class=None, func_class_kwargs=None, catch=False, group_count=100, storage='s3')[source]Âś
+

Distribute processing rows in a table across multiple AWS Lambda invocations.

+

If you are running the processing of a table inside AWS Lambda, then you +are limited by how many rows can be processed within the Lambda’s time limit +(at time-of-writing, maximum 15min).

+

Based on experience and some napkin math, with +the same data that would allow 1000 rows to be processed inside a single +AWS Lambda instance, this method allows 10 MILLION rows to be processed.

+

Rather than converting the table to SQS +or other options, the fastest way is to upload the table to S3, and then +invoke multiple Lambda sub-invocations, each of which can be sent a +byte-range of the data in the S3 CSV file for which to process.

+

Using this method requires some setup. You have three tasks:

+
    +
  1. Define the function to process rows, the first argument, must take +your table’s data (though only a subset of rows will be passed) +(e.g. def task_for_distribution(table, **kwargs):)
  2. +
  3. Where you would have run task_for_distribution(my_table, **kwargs) +instead call `distribute_task(my_table, task_for_distribution, func_kwargs=kwargs) +(either setting env var S3_TEMP_BUCKET or passing a bucket= parameter)
  4. +
  5. Setup your Lambda handler to include parsons.aws.event_command() +(or run and deploy your lambda with Zappa)
  6. +
+

To test locally, include the argument storage=”local” which will test +the distribute_task function, but run the task sequentially and in local memory.

+

A minimalistic example Lambda handler might look something like this:

+
from parsons.aws import event_command, distribute_task
+
+def process_table(table, foo, bar=None):
+    for row in table:
+        do_sloooooow_thing(row, foo, bar)
+
+def handler(event, context):
+    ## ADD THESE TWO LINES TO TOP OF HANDLER:
+    if event_command(event, context):
+        return
+    table = FakeDatasource.load_to_table(username='123', password='abc')
+    # table is so big that running
+    #   process_table(table, foo=789, bar='baz') would timeout
+    # so instead we:
+    distribute_task(table, process_table,
+                    bucket='my-temp-s3-bucket',
+                    func_kwargs={'foo': 789, 'bar': 'baz'})
+
+
+
+
Args:
+
+
table: Parsons Table
+
Table of data you wish to distribute processing across Lambda invocations +of func_to_run argument.
+
func_to_run: function
+
The function you want to run whose +first argument will be a subset of table
+
bucket: str
+
The bucket name to use for s3 upload to process the whole table +Not required if you set environment variable S3_TEMP_BUCKET
+
func_kwargs: dict
+
If the function has other arguments to pass along with table +then provide them as a dict here. They must all be JSON-able.
+
func_class: class
+
If the function is a classmethod or function on a class, +then pass the pure class here. +E.g. If you passed ActionKit.bulk_upload_table, +then you would pass ActionKit here.
+
func_class_kwargs: dict
+
If it is a class function, and the class must be instantiated, +then pass the kwargs to instantiate the class here. +E.g. If you passed ActionKit.bulk_upload_table as the function, +then you would pass {‘domain’: …, ‘username’: … etc} here. +This must all be JSON-able data.
+
catch: bool
+
Lambda will retry running an event three times if there’s an +exception – if you want to prevent this, set catch=True +and then it will catch any errors and stop retries. +The error will be in CloudWatch logs with string “Distribute Error” +This might be important if row-actions are not idempotent and your +own function might fail causing repeats.
+
group_count: int
+
Set this to how many rows to process with each Lambda invocation (Default: 100)
+
storage: str
+
Debugging option: Defaults to “s3”. To test distribution locally without s3, +set to “local”.
+
+
+
Returns:
+
Debug information – do not rely on the output, as it will change +depending on how this method is invoked.
+
+
+ +
+
+parsons.aws.event_command(event, context)[source]Âś
+

Minimal shim to add to the top lambda handler function +to enable distributed tasks +In your lambda handler:

+
from parsons.aws import event_command
+
+def handler(event, context):
+    ## ADD THESE TWO LINES TO TOP OF HANDLER:
+    if event_command(event, context):
+        return
+
+
+

The rest of this library is compatible with zappa.async library. +If you have deployed your app with Zappa <https://github.com/Miserlou/Zappa>, +then you do NOT need to add this shim.

+
+ +
+

.. autofunction :: parsons.aws.event_command

+
+
+

S3Âś

+
+

OverviewÂś

+

S3 is Amazon Web Service’s object storage service that allows users to store and access data objects. The Parson’s class is a high level wrapper of the AWS SDK boto3. It allows users to upload and download files from S3 as well as manipulate buckets.

+
+

Note

+
+
Authentication
+
Access to S3 is controlled through AWS Identity and Access Management (IAM) users in the AWS Managerment Console . Users can be granted granular access to AWS resources, including S3. IAM users are provisioned keys, which are required to access the S3 class.
+
+
+
+
+

QuickStartÂś

+

Instantiate class with credentials.

+
from parsons import S3
+
+# First approach: Use API credentials via environmental variables
+s3 = S3()
+
+# Second approach: Pass API credentials as arguments
+s3 = S3(aws_access_key_id='MY_KEY', aws_secret_access_key='MY_SECRET')
+
+# Third approach: Use credentials stored in AWS CLI file ~/.aws/credentials
+s3 = S3()
+
+
+

You can then call various endpoints:

+
from parsons import S3, Table
+
+s3 = S3(aws_access_key_id='MY_KEY', aws_secret_access_key='MY_SECRET')
+
+# Put an arbitrary file in an S3 bucket
+with open('winning_formula.csv') as w:
+  s3.put_file('my_bucket', 'winning.csv, w)
+
+# Put a Parsons Table as a CSV using convenience method.
+tbl = Table.from_csv('winning_formula.csv')
+tbl.to_s3_csv('my_bucket', 'winning.csv')
+
+# Download a csv file and convert to a table
+f = s3.get_file('my_bucket', 'my_dir/my_file.csv')
+tbl = Table(f)
+
+# List buckets that you have access to
+s3.list_buckets()
+
+# List the keys in a bucket
+s3.list_keys('my_bucket')
+
+
+
+
+

APIÂś

+
+
+class parsons.S3(aws_access_key_id=None, aws_secret_access_key=None)[source]Âś
+

Instantiate the S3 class.

+
+
Args:
+
+
aws_access_key_id: str
+
The AWS access key id. Not required if the AWS_ACCESS_KEY_ID env variable +is set.
+
aws_secret_access_key: str
+
The AWS secret access key. Not required if the AWS_SECRET_ACCESS_KEY env +variable is set.
+
+
+
Returns:
+
S3 class.
+
+
+
+s3 = NoneÂś
+

Boto3 API Session Resource object. Use for more advanced boto3 features.

+
+ +
+
+client = NoneÂś
+

Boto3 API Session client object. Use for more advanced boto3 features.

+
+ +
+
+list_buckets()[source]Âś
+

List all buckets to which you have access.

+
+
Returns:
+
list
+
+
+ +
+
+bucket_exists(bucket)[source]Âś
+

Determine if a bucket exists and you have access to it.

+
+
Args:
+
+
bucket: str
+
The bucket name
+
+
+
Returns:
+
+
boolean
+
True if the bucket exists and False if not.
+
+
+
+
+ +
+
+list_keys(bucket, prefix=None, suffix=None, regex=None, date_modified_before=None, date_modified_after=None, **kwargs)[source]Âś
+

List the keys in a bucket, along with extra info about each one.

+
+
Args:
+
+
bucket: str
+
The bucket name
+
prefix: str
+
Limits the response to keys that begin with the specified prefix.
+
suffix: str
+
Limits the response to keys that end with specified suffix
+
regex: str
+
Limits the reponse to keys that match a regex pattern
+
date_modified_before: datetime.datetime
+
Limits the response to keys with date modified before
+
date_modified_after: datetime.datetime
+
Limits the response to keys with date modified after
+
kwargs:
+
Additional arguments for the S3 API call. See AWS ListObjectsV2 documentation +for more info.
+
+
+
Returns:
+
+
dict
+
Dict mapping the keys to info about each key. The info includes ‘LastModified’, +‘Size’, and ‘Owner’.
+
+
+
+
+ +
+
+key_exists(bucket, key)[source]Âś
+

Determine if a key exists in a bucket.

+
+
Args:
+
+
bucket: str
+
The bucket name
+
key: str
+
The object key
+
+
+
Returns:
+
+
boolean
+
True if key exists and False if not.
+
+
+
+
+ +
+
+create_bucket(bucket)[source]Âś
+

Create an s3 bucket.

+
+

Warning

+

S3 has a limit on the number of buckets you can create in an AWS account, and +that limit is fairly low (typically 100). If you are creating buckets frequently, +you may be mis-using S3, and should consider using the same bucket for multiple tasks. +There is no limit on the number of objects in a bucket. +See AWS bucket restrictions for more +info.

+
+
+

Warning

+

S3 bucket names are globally unique. So when creating a new bucket, +the name can’t collide with any existing bucket names. If the provided name does +collide, you’ll see errors like IllegalLocationConstraintException or +BucketAlreadyExists.

+
+
+
Args:
+
+
bucket: str
+
The name of the bucket to create
+
+
+
Returns:
+
None
+
+
+ +
+
+put_file(bucket, key, local_path, acl='bucket-owner-full-control', **kwargs)[source]Âś
+

Uploads an object to an S3 bucket

+
+
Args:
+
+
bucket: str
+
The bucket name
+
key: str
+
The object key
+
local_path: str
+
The local path of the file to upload
+
acl: str
+
The S3 permissions on the file
+
kwargs:
+
Additional arguments for the S3 API call. See AWS Put Object documentation for more +info.
+
+
+
+
+ +
+
+remove_file(bucket, key)[source]Âś
+

Deletes an object from an S3 bucket

+
+
Args:
+
+
bucket: str
+
The bucket name
+
key: str
+
The object key
+
+
+
Returns:
+
None
+
+
+ +
+
+get_file(bucket, key, local_path=None, **kwargs)[source]Âś
+

Download an object from S3 to a local file

+
+
Args:
+
+
local_path: str
+
The local path where the file will be downloaded. If not specified, a temporary +file will be created and returned, and that file will be removed automatically +when the script is done running.
+
bucket: str
+
The bucket name
+
key: str
+
The object key
+
kwargs:
+
Additional arguments for the S3 API call. See AWS download_file documentation +for more info.
+
+
+
Returns:
+
+
str
+
The path of the new file
+
+
+
+
+ +
+
+get_url(bucket, key, expires_in=3600)[source]Âś
+

Generates a presigned url for an s3 object.

+
+
Args:
+
+
bucket: str
+
The bucket name
+
key: str
+
The object name
+
expires_in: int
+
The time, in seconds, until the url expires
+
+
+
Returns:
+
+
Url:
+
A link to download the object
+
+
+
+
+ +
+
+transfer_bucket(origin_bucket, origin_key, destination_bucket, destination_key=None, suffix=None, regex=None, date_modified_before=None, date_modified_after=None, public_read=False, remove_original=False, **kwargs)[source]Âś
+

Transfer files between s3 buckets +Args:

+
+
+
origin_bucket: str
+
The origin bucket
+
origin_key: str
+
The origin file or prefix
+
destination_bucket: str
+
The destination bucket
+
destination_key: str
+
If None then will retain the origin key. If set to prefix will move all +to new prefix
+
suffix: str
+
Limits the response to keys that end with specified suffix
+
regex: str
+
Limits the reponse to keys that match a regex pattern
+
date_modified_before: datetime.datetime
+
Limits the response to keys with date modified before
+
date_modified_after: datetime.datetime
+
Limits the response to keys with date modified after
+
public_read: bool
+
If the keys should be set to public-read
+
remove_original: bool
+
If the original keys should be removed after transfer
+
kwargs:
+
Additional arguments for the S3 API call. See AWS download_file documentation +for more info.
+
+
+
+
Returns:
+
None
+
+
+ +
+ +
+

:members:

+
+
+

RedshiftÂś

+
+

OverviewÂś

+

The Redshift class allows you to interact with an Amazon Redshift relational database. The Redshift Connector utilizes the psycopg2 python package to connect to the database.

+
+

Note

+
+
S3 Credentials
+
Redshift only allows data to be copied to the database via S3. As such, the the copy() and copy_s3() +methods require S3 credentials and write access on an S3 Bucket, which will be used for storing data en route to +Redshift.
+
Whitelisting
+
Remember to ensure that the IP address from which you are connecting has been whitelisted.
+
+
+
+
+

QuickstartÂś

+

Query the Database

+
from parsons import Redshift
+rs = Redshift()
+table = rs.query('select * from tmc_scratch.test_data')
+
+
+

Copy a Parsons Table to the Database

+
from parsons import Redshift
+rs = Redshift()
+table = rs.copy(tbl, 'tmc_scratch.test_table', if_exists='drop')
+
+
+

All of the standard copy options can be passed as kwargs. See the copy() method for all +options.

+
+
+

Core APIÂś

+

Redshift core methods focus on input, output and querying of the database.

+
+
+class parsons.Redshift(username=None, password=None, host=None, db=None, port=None, timeout=10, s3_temp_bucket=None, aws_access_key_id=None, aws_secret_access_key=None, iam_role=None)[source]Âś
+

A Redshift class to connect to database.

+
+
Args:
+
+
username: str
+
Required if env variable REDSHIFT_USERNAME not populated
+
password: str
+
Required if env variable REDSHIFT_PASSWORD not populated
+
host: str
+
Required if env variable REDSHIFT_HOST not populated
+
db: str
+
Required if env variable REDSHIFT_DB not populated
+
port: int
+
Required if env variable REDSHIFT_PORT not populated. Port 5439 is typical.
+
timeout: int
+
Seconds to timeout if connection not established
+
s3_temp_bucket: str
+
Name of the S3 bucket that will be used for storing data during bulk transfers. +Required if you intend to perform bulk data transfers (eg. the copy_s3 method), +and env variable S3_TEMP_BUCKET is not populated.
+
aws_access_key_id: str
+
The default AWS access key id for copying data from S3 into Redshift +when running copy/upsert/etc methods. +This will default to environment variable AWS_ACCESS_KEY_ID.
+
aws_secret_access_key: str
+
The default AWS secret access key for copying data from S3 into Redshift +when running copy/upsert/etc methods. +This will default to environment variable AWS_SECRET_ACCESS_KEY.
+
iam_role: str
+
AWS IAM Role ARN string – an optional, different way for credentials to +be provided in the Redshift copy command that does not require an access key.
+
+
+
+
+ +
+
+parsons.Redshift.connection(self)Âś
+

Generate a Redshift connection. +The connection is set up as a python “context manager”, so it will be closed +automatically (and all queries committed) when the connection goes out of scope.

+

When using the connection, make sure to put it in a with block (necessary for +any context manager): +with rs.connection() as conn:

+
+
Returns:
+
Psycopg2 connection object
+
+
+ +
+
+parsons.Redshift.query(self, sql, parameters=None)Âś
+

Execute a query against the Redshift database. Will return None +if the query returns zero rows.

+

To include python variables in your query, it is recommended to pass them as parameters, +following the psycopg style. +Using the parameters argument ensures that values are escaped properly, and avoids SQL +injection attacks.

+

Parameter Examples

+
# Note that the name contains a quote, which could break your query if not escaped
+# properly.
+name = "Beatrice O'Brady"
+sql = "SELECT * FROM my_table WHERE name = %s"
+rs.query(sql, parameters=[name])
+
+
+
names = ["Allen Smith", "Beatrice O'Brady", "Cathy Thompson"]
+placeholders = ', '.join('%s' for item in names)
+sql = f"SELECT * FROM my_table WHERE name IN ({placeholders})"
+rs.query(sql, parameters=names)
+
+
+
+
Args:
+
+
sql: str
+
A valid SQL statement
+
parameters: list
+
A list of python variables to be converted into SQL values in your query
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+parsons.Redshift.query_with_connection(self, sql, connection, parameters=None, commit=True)Âś
+

Execute a query against the Redshift database, with an existing connection. +Useful for batching queries together. Will return None if the query +returns zero rows.

+
+
Args:
+
+
sql: str
+
A valid SQL statement
+
connection: obj
+
A connection object obtained from redshift.connection()
+
parameters: list
+
A list of python variables to be converted into SQL values in your query
+
commit: boolean
+
Whether to commit the transaction immediately. If False the transaction will +be committed when the connection goes out of scope and is closed (or you can +commit manually with connection.commit()).
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+parsons.Redshift.copy(self, tbl, table_name, if_exists='fail', max_errors=0, distkey=None, sortkey=None, padding=None, statupdate=False, compupdate=True, acceptanydate=True, emptyasnull=True, blanksasnull=True, nullas=None, acceptinvchars=True, dateformat='auto', timeformat='auto', varchar_max=None, truncatecolumns=False, columntypes=None, specifycols=None, alter_table=False, aws_access_key_id=None, aws_secret_access_key=None, iam_role=None, cleanup_s3_file=True, template_table=None, temp_bucket_region=None)Âś
+

Copy a Parsons Table to Redshift.

+
+
Args:
+
+
tbl: obj
+
A Parsons Table.
+
table_name: str
+
The destination table name (ex. my_schema.my_table).
+
if_exists: str
+
If the table already exists, either fail, append, drop +or truncate the table.
+
max_errors: int
+
The maximum number of rows that can error and be skipped before +the job fails.
+
distkey: str
+
The column name of the distkey
+
sortkey: str
+
The column name of the sortkey
+
padding: float
+
A percentage padding to add to varchar columns if creating a new table. This is +helpful to add a buffer for future copies in which the data might be wider.
+
varchar_max: list
+
A list of columns in which to set the width of the varchar column to 65,535 +characters.
+
statupate: boolean
+
Governs automatic computation and refresh of optimizer statistics at the end +of a successful COPY command.
+
compupdate: boolean
+
Controls whether compression encodings are automatically applied during a COPY.
+
acceptanydate: boolean
+
Allows any date format, including invalid formats such as 00/00/00 00:00:00, to be +loaded without generating an error.
+
emptyasnull: boolean
+
Indicates that Amazon Redshift should load empty char and varchar fields +as NULL.
+
blanksasnull: boolean
+
Loads blank varchar fields, which consist of only white space characters, +as NULL.
+
nullas: str
+
Loads fields that match string as NULL
+
acceptinvchars: boolean
+
Enables loading of data into VARCHAR columns even if the data contains +invalid UTF-8 characters.
+
dateformat: str
+
Set the date format. Defaults to auto.
+
timeformat: str
+
Set the time format. Defaults to auto.
+
truncatecolumns: boolean
+
If the table already exists, truncates data in columns to the appropriate number +of characters so that it fits the column specification. Applies only to columns +with a VARCHAR or CHAR data type, and rows 4 MB or less in size.
+
columntypes: dict
+
Optional map of column name to redshift column type, overriding the usual type +inference. You only specify the columns you want to override, eg. +columntypes={'phone': 'varchar(12)', 'age': 'int'}).
+
specifycols: boolean
+

Adds a column list to the Redshift COPY command, allowing for the source table +in an append to have the columnns out of order, and to have fewer columns with any +leftover target table columns filled in with the DEFAULT value.

+

This will fail if all of the source table’s columns do not match a column in the +target table. This will also fail if the target table has an IDENTITY +column and that column name is among the source table’s columns.

+
+
alter_table: boolean
+
Will check if the target table varchar widths are wide enough to copy in the +table data. If not, will attempt to alter the table to make it wide enough. This +will not work with tables that have dependent views.
+
aws_access_key_id:
+
An AWS access key granted to the bucket where the file is located. Not required +if keys are stored as environmental variables.
+
aws_secret_access_key:
+
An AWS secret access key granted to the bucket where the file is located. Not +required if keys are stored as environmental variables.
+
iam_role: str
+
An AWS IAM Role ARN string; an alternative credential for the COPY command +from Redshift to S3. The IAM role must have been assigned to the Redshift +instance and have access to the S3 bucket.
+
cleanup_s3_file: boolean
+
The s3 upload is removed by default on cleanup. You can set to False for debugging.
+
template_table: str
+
Instead of specifying columns, columntypes, and/or inference, if there +is a pre-existing table that has the same columns/types, then use the template_table +table name as the schema for the new table. +Unless you set specifycols=False explicitly, a template_table will set it to True
+
temp_bucket_region: str
+
The AWS region that the temp bucket (specified by the TEMP_S3_BUCKET environment +variable) is located in. This should be provided if the Redshift cluster is located +in a different region from the temp bucket.
+
+
+
Returns
+
+
Parsons Table or None
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+parsons.Redshift.copy_s3(self, table_name, bucket, key, manifest=False, data_type='csv', csv_delimiter=', ', compression=None, if_exists='fail', max_errors=0, distkey=None, sortkey=None, padding=None, varchar_max=None, statupdate=True, compupdate=True, ignoreheader=1, acceptanydate=True, dateformat='auto', timeformat='auto', emptyasnull=True, blanksasnull=True, nullas=None, acceptinvchars=True, truncatecolumns=False, columntypes=None, specifycols=None, aws_access_key_id=None, aws_secret_access_key=None, bucket_region=None)Âś
+

Copy a file from s3 to Redshift.

+
+
Args:
+
+
table_name: str
+
The table name and schema (tmc.cool_table) to point the file.
+
bucket: str
+
The s3 bucket where the file or manifest is located.
+
key: str
+
The key of the file or manifest in the s3 bucket.
+
manifest: str
+
If using a manifest
+
data_type: str
+
The data type of the file. Only csv supported currently.
+
csv_delimiter: str
+
The delimiter of the csv. Only relevant if data_type is csv.
+
compression: str
+
If specified (gzip), will attempt to decompress the file.
+
if_exists: str
+
If the table already exists, either fail, append, drop +or truncate the table.
+
max_errors: int
+
The maximum number of rows that can error and be skipped before +the job fails.
+
distkey: str
+
The column name of the distkey
+
sortkey: str
+
The column name of the sortkey
+
padding: float
+
A percentage padding to add to varchar columns if creating a new table. This is +helpful to add a buffer for future copies in which the data might be wider.
+
varchar_max: list
+
A list of columns in which to set the width of the varchar column to 65,535 +characters.
+
statupate: boolean
+
Governs automatic computation and refresh of optimizer statistics at the end +of a successful COPY command.
+
compupdate: boolean
+
Controls whether compression encodings are automatically applied during a COPY.
+
ignore_header: int
+
The number of header rows to skip. Ignored if data_type is json.
+
acceptanydate: boolean
+
Allows any date format, including invalid formats such as 00/00/00 00:00:00, to be +loaded without generating an error.
+
emptyasnull: boolean
+
Indicates that Amazon Redshift should load empty char and varchar fields +as NULL.
+
blanksasnull: boolean
+
Loads blank varchar fields, which consist of only white space characters, +as NULL.
+
nullas: str
+
Loads fields that match string as NULL
+
acceptinvchars: boolean
+
Enables loading of data into VARCHAR columns even if the data contains +invalid UTF-8 characters.
+
dateformat: str
+
Set the date format. Defaults to auto.
+
timeformat: str
+
Set the time format. Defaults to auto.
+
truncatecolumns: boolean
+
If the table already exists, truncates data in columns to the appropriate number +of characters so that it fits the column specification. Applies only to columns +with a VARCHAR or CHAR data type, and rows 4 MB or less in size.
+
columntypes: dict
+
Optional map of column name to redshift column type, overriding the usual type +inference. You only specify the columns you want to override, eg. +columntypes={'phone': 'varchar(12)', 'age': 'int'}).
+
specifycols: boolean
+

Adds a column list to the Redshift COPY command, allowing for the source table +in an append to have the columnns out of order, and to have fewer columns with any +leftover target table columns filled in with the DEFAULT value.

+

This will fail if all of the source table’s columns do not match a column in the +target table. This will also fail if the target table has an IDENTITY +column and that column name is among the source table’s columns.

+
+
aws_access_key_id:
+
An AWS access key granted to the bucket where the file is located. Not required +if keys are stored as environmental variables.
+
aws_secret_access_key:
+
An AWS secret access key granted to the bucket where the file is located. Not +required if keys are stored as environmental variables.
+
bucket_region: str
+
The AWS region that the bucket is located in. This should be provided if the +Redshift cluster is located in a different region from the temp bucket.
+
+
+
Returns
+
+
Parsons Table or None
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+parsons.Redshift.unload(self, sql, bucket, key_prefix, manifest=True, header=True, delimiter='|', compression='gzip', add_quotes=True, null_as=None, escape=True, allow_overwrite=True, parallel=True, max_file_size='6.2 GB', aws_region=None, aws_access_key_id=None, aws_secret_access_key=None)Âś
+

Unload Redshift data to S3 Bucket. This is a more efficient method than running a query +to export data as it can export in parallel and directly into an S3 bucket. Consider +using this for exports of 10MM or more rows.

+
+
sql: str
+
The SQL string to execute to generate the data to unload.
+
buckey: str
+
The destination S3 bucket
+
key_prefix: str
+
The prefix of the key names that will be written
+
manifest: boolean
+
Creates a manifest file that explicitly lists details for the data files +that are created by the UNLOAD process.
+
header: boolean
+
Adds a header line containing column names at the top of each output file.
+
delimiter: str
+
Specificies the character used to separate fields. Defaults to ‘|’.
+
compression: str
+
One of gzip, bzip2 or None. Unloads data to one or more compressed +files per slice. Each resulting file is appended with a .gz or .bz2 extension.
+
add_quotes: boolean
+
Places quotation marks around each unloaded data field, so that Amazon Redshift +can unload data values that contain the delimiter itself.
+
null_as: str
+
Specifies a string that represents a null value in unload files. If this option is +not specified, null values are unloaded as zero-length strings for delimited output.
+
escape: boolean
+
For CHAR and VARCHAR columns in delimited unload files, an escape character () is +placed before every linefeed, carriage return, escape characters and delimiters.
+
allow_overwrite: boolean
+
If True, will overwrite existing files, including the manifest file. If False +will fail.
+
parallel: boolean
+
By default, UNLOAD writes data in parallel to multiple files, according to the number +of slices in the cluster. The default option is ON or TRUE. If PARALLEL is OFF or +FALSE, UNLOAD writes to one or more data files serially, sorted absolutely according +to the ORDER BY clause, if one is used.
+
max_file_size: str
+
The maximum size of files UNLOAD creates in Amazon S3. Specify a decimal value between +5 MB and 6.2 GB.
+
region: str
+
The AWS Region where the target Amazon S3 bucket is located. REGION is required for +UNLOAD to an Amazon S3 bucket that is not in the same AWS Region as the Amazon Redshift +cluster.
+
aws_access_key_id:
+
An AWS access key granted to the bucket where the file is located. Not required +if keys are stored as environmental variables.
+
aws_secret_access_key:
+
An AWS secret access key granted to the bucket where the file is located. Not +required if keys are stored as environmental variables.
+
+
+ +
+
+parsons.Redshift.upsert(self, table_obj, target_table, primary_key, vacuum=True, distinct_check=True, cleanup_temp_table=True, alter_table=True, **copy_args)Âś
+

Preform an upsert on an existing table. An upsert is a function in which records +in a table are updated and inserted at the same time. Unlike other SQL databases, +it does not exist natively in Redshift.

+
+
Args:
+
+
table_obj: obj
+
A Parsons table object
+
target_table: str
+
The schema and table name to upsert
+
primary_key: str or list
+
The primary key column(s) of the target table
+
vacuum: boolean
+
Re-sorts rows and reclaims space in the specified table. You must be a table owner +or super user to effectively vacuum a table, however the method will not fail +if you lack these priviledges.
+
distinct_check: boolean
+
Check if the primary key column is distinct. Raise error if not.
+
cleanup_temp_table: boolean
+
A temp table is dropped by default on cleanup. You can set to False for debugging.
+
**copy_args: kwargs
+
See copy`() for options.
+
+
+
+
+ +
+
+parsons.Redshift.generate_manifest(self, buckets, aws_access_key_id=None, aws_secret_access_key=None, mandatory=True, prefix=None, manifest_bucket=None, manifest_key=None, path=None)Âś
+

Given a list of S3 buckets, generate a manifest file (JSON format). A manifest file +allows you to copy multiple files into a single table at once. Once the manifest is +generated, you can pass it with the copy_s3() method.

+

AWS keys are not required if AWS_ACCESS_KEY_ID and +AWS_SECRET_ACCESS_KEY environmental variables set.

+

Args:

+
+
+
buckets: list or str
+
A list of buckets or single bucket from which to generate manifest
+
aws_access_key_id: str
+
AWS access key id to access S3 bucket
+
aws_secret_access_key: str
+
AWS secret access key to access S3 bucket
+
mandatory: boolean
+
The mandatory flag indicates whether the Redshift COPY should +terminate if the file does not exist.
+
prefix: str
+
Optional filter for key prefixes
+
manifest_bucket: str
+
Optional bucket to write manifest file.
+
manifest_key: str
+
Optional key name for S3 bucket to write file
+
+
+
+
Returns:
+
dict of manifest
+
+
+ +
+
+parsons.Redshift.alter_table_column_type(self, table_name, column_name, data_type, varchar_width=None)Âś
+

Alter a column type of an existing table.

+
+
table_name: str
+
The table name (ex. my_schema.my_table).
+
column_name: str
+
The target column name
+
data_type: str
+
A valid Redshift data type to alter the table to.
+
varchar_width:
+
The new width of the column if of type varchar.
+
+
+ +
+
+

Table and View APIÂś

+

Table and view utilities are a series of helper methods, all built off of commonly +used SQL queries run against the Redshift database.

+
+
+class parsons.databases.redshift.redshift.RedshiftTableUtilities[source]Âś
+
+
+table_exists(table_name, view=True)[source]Âś
+

Check if a table or view exists in the database.

+
+
Args:
+
+
table_name: str
+
The table name and schema (e.g. myschema.mytable).
+
view: boolean
+
Check to see if a view exists by the same name
+
+
+
Returns:
+
+
boolean
+
True if the table exists and False if it does not.
+
+
+
+
+ +
+
+get_row_count(table_name)[source]Âś
+

Return the row count of a table.

+

SQL Code

+
SELECT COUNT(*) FROM myschema.mytable
+
+
+
+
Args:
+
+
table_name: str
+
The schema and name (e.g. myschema.mytable) of the table.
+
+
+
Returns:
+
int
+
+
+ +
+
+rename_table(table_name, new_table_name)[source]Âś
+

Rename an existing table.

+
+

Note

+

You cannot move schemas when renaming a table. Instead, utilize +the table_duplicate(). method.

+
+
+
Args:
+
+
table_name: str
+
Name of existing schema and table (e.g. myschema.oldtable)
+
new_table_name: str
+
New name for table with the schema omitted (e.g. newtable).
+
+
+
+
+ +
+
+move_table(source_table, new_table, drop_source_table=False)[source]Âś
+

Move an existing table in the database.It will inherit encoding, sortkey +and distkey. Once run, the source table rows will be empty. This is +more efficiant than running "create newtable as select * from oldtable".

+

For more information see: ALTER TABLE APPEND

+
+
Args:
+
+
source_table: str
+
Name of existing schema and table (e.g. my_schema.old_table)
+
new_table: str
+
New name of schema and table (e.g. my_schema.newtable)
+
drop_original: boolean
+
Drop the source table.
+
+
+
Returns:
+
None
+
+
+ +
+
+populate_table_from_query(query, destination_table, if_exists='fail', distkey=None, sortkey=None)[source]Âś
+

Populate a Redshift table with the results of a SQL query, creating the table if it +doesn’t yet exist.

+
+
Args:
+
+
query: str
+
The SQL query
+
destination_table: str
+
Name of destination schema and table (e.g. mys_chema.new_table)
+
if_exists: str
+
If the table already exists, either fail, append, drop, +or truncate the table.
+
distkey: str
+
The column to use as the distkey for the table.
+
sortkey: str
+
The column to use as the sortkey for the table.
+
+
+
+
+ +
+
+duplicate_table(source_table, destination_table, where_clause='', if_exists='fail', drop_source_table=False)[source]Âś
+

Create a copy of an existing table (or subset of rows) in a new +table. It will inherit encoding, sortkey and distkey.

+
+
Args:
+
+
source_table: str
+
Name of existing schema and table (e.g. myschema.oldtable)
+
destination_table: str
+
Name of destination schema and table (e.g. myschema.newtable)
+
where_clause: str
+
An optional where clause (e.g. where org = 1).
+
if_exists: str
+
If the table already exists, either fail, append, drop, +or truncate the table.
+
drop_source_table: boolean
+
Drop the source table
+
+
+
+
+ +
+
+union_tables(new_table_name, tables, union_all=True, view=False)[source]Âś
+

Union a series of table into a new table.

+
+
Args:
+
+
new_table_name: str
+
The new table and schema (e.g. myschema.newtable)
+
tables: list
+
A list of tables to union
+
union_all: boolean
+
If False will deduplicate rows. If True will include +duplicate rows.
+
view: boolean
+
Create a view rather than a static table
+
+
+
Returns:
+
None
+
+
+ +
+
+get_tables(schema=None, table_name=None)[source]Âś
+

List the tables in a schema including metadata.

+
+
Args:
+
+
schema: str
+
Filter by a schema
+
table_name: str
+
Filter by a table name
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_table_stats(schema=None, table_name=None)[source]Âś
+

List the tables statistics includes row count and size.

+
+

Warning

+

This method is only accessible by Redshift superusers.

+
+
+
Args:
+
+
schema: str
+
Filter by a schema
+
table_name: str
+
Filter by a table name
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_columns(schema, table_name)[source]Âś
+

Gets the column names (and some other column info) for a table.

+

If you just need the column names, run get_columns_list() as it is faster.

+
for col in rs.get_columns('some_schema', 'some_table'):
+    print(col)
+
+
+
+
Args:
+
+
schema: str
+
The schema name
+
table_name: str
+
The table name
+
+
+
Returns:
+

A dict mapping column name to a dict with extra info. The keys of the dict are ordered +just like the columns in the table. The extra info is a dict with format +``{‘data_type’: str, ‘max_length’: int or None, ‘max_precision’: int or None,

+
+
‘max_scale’: int or None, ‘is_nullable’: bool}``
+
+
+
+ +
+
+get_columns_list(schema, table_name)[source]Âś
+

Gets the just the column names for a table.

+
+
Args:
+
+
schema: str
+
The schema name
+
table_name: str
+
The table name
+
+
+
Returns:
+
A list of column names.
+
+
+ +
+
+get_views(schema=None, view=None)[source]Âś
+

List views.

+
+
Args:
+
+
schema: str
+
Filter by a schema
+
view: str
+
Filter by a table name
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_queries()[source]Âś
+

Return the Current queries running and queueing, along with resource consumption.

+
+

Warning

+

Must be a Redshift superuser to run this method.

+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_max_value(table_name, value_column)[source]Âś
+

Return the max value from a table.

+
+
Args:
+
+
table_name: str
+
Schema and table name
+
value_column: str
+
The column containing the values
+
+
+
+
+ +
+
+get_object_type(object_name)[source]Âś
+

Get object type.

+

One of view, table, index, sequence, or TOAST table.

+
+
Args:
+
+
object_name: str
+
The schema.obj for which to get the object type.
+
+
+
Returns:
+
str of the object type.
+
+
+ +
+
+is_view(object_name)[source]Âś
+

Return true if the object is a view.

+
+
Args:
+
+
object_name: str
+
The schema.obj to test if it’s a view.
+
+
+
Returns:
+
bool
+
+
+ +
+
+is_table(object_name)[source]Âś
+

Return true if the object is a table.

+
+
Args:
+
+
object_name: str
+
The schema.obj to test if it’s a table.
+
+
+
Returns:
+
bool
+
+
+ +
+
+get_table_definition(table)[source]Âś
+

Get the table definition (i.e. the create statement).

+
+
Args:
+
+
table: str
+
The schema.table for which to get the table definition.
+
+
+
Returns:
+
str
+
+
+ +
+
+get_table_definitions(schema=None, table=None)[source]Âś
+

Get the table definition (i.e. the create statement) for multiple tables.

+

This works similar to get_table_def except it runs a single query +to get the ddl for multiple tables. It supports SQL wildcards for +schema and table. Only returns the ddl for _tables_ that match +schema and table if they exist.

+
+
Args:
+
+
schema: str
+
The schema to filter by.
+
table: str
+
The table to filter by.
+
+
+
Returns:
+
list of dicts with matching tables.
+
+
+ +
+
+get_view_definition(view)[source]Âś
+

Get the view definition (i.e. the create statement).

+
+
Args:
+
+
view: str
+
The schema.view for which to get the view definition.
+
+
+
Returns:
+
str
+
+
+ +
+
+get_view_definitions(schema=None, view=None)[source]Âś
+

Get the view definition (i.e. the create statement) for multiple views.

+

This works similar to get_view_def except it runs a single query +to get the ddl for multiple views. It supports SQL wildcards for +schema and view. Only returns the ddl for _views_ that match +schema and view if they exist.

+
+
Args:
+
+
schema: str
+
The schema to filter by.
+
view: str
+
The view to filter by.
+
+
+
Returns:
+
list of dicts with matching views.
+
+
+ +
+
+static split_full_table_name(full_table_name)[source]Âś
+

Split a full table name into its schema and table. If a schema isn’t +present, return public for the schema. Similarly, Redshift defaults +to the public schema, when one isn’t provided.

+

Eg: +(schema, table) = Redshift.split_full_table_name("some_schema.some_table")

+
+
Args:
+
+
full_table_name: str
+
The table name, as “schema.table”
+
+
+
Returns:
+
+
tuple
+
A tuple containing (schema, table)
+
+
+
+
+ +
+
+static combine_schema_and_table_name(schema, table)[source]Âś
+

Creates a full table name by combining a schema and table.

+
+
Args:
+
+
schema: str
+
The schema name
+
table: str
+
The table name
+
+
+
Returns:
+
+
str
+
The combined full table name
+
+
+
+
+ +
+ +
+
+

Schema APIÂś

+

Schema utilities are a series of helper methods, all built off of commonly +used SQL queries run against the Redshift database.

+
+
+class parsons.databases.redshift.redshift.RedshiftSchema[source]Âś
+
+
+create_schema_with_permissions(schema, group=None)[source]Âś
+

Creates a Redshift schema (if it doesn’t already exist), and grants usage permissions to +a Redshift group (if specified).

+
+
Args:
+
+
schema: str
+
The schema name
+
group: str
+
The Redshift group name
+
type: str
+
The type of permissions to grant. Supports select, all, etc. (For +full list, see the +Redshift GRANT docs)
+
+
+
+
+ +
+
+grant_schema_permissions(schema, group, permissions_type='select')[source]Âś
+

Grants a Redshift group permissions to all tables within an existing schema.

+
+
Args:
+
+
schema: str
+
The schema name
+
group: str
+
The Redshift group name
+
type: str
+
The type of permissions to grant. Supports select, all, etc. (For +full list, see the +Redshift GRANT docs)
+
+
+
+
+ +
+ +
+
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/civis.html b/docs/html/civis.html new file mode 100644 index 0000000000..70461e8cd4 --- /dev/null +++ b/docs/html/civis.html @@ -0,0 +1,386 @@ + + + + + + + + + + + Civis — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

CivisÂś

+
+

OverviewÂś

+

The Civis Platform is a cloud-based data science platform. +This Parsons connector utilizes the Civis API Python client +to interact with the Civis Platform. It supports executing Civis SQL queries and writing Parsons Tables to a Civis +Redshift cluster.

+
+

Note

+
+
Authentication
+
The CivisClient class requires your Redshift database ID or name, and an API Key. To obtain an API Key, log in to +Civis and follow the instructions for Creating an API Key.
+
+
+
+
+

QuickstartÂś

+

To instantiate the CivisClient class, you can either store your database identifier and API Key as +environmental variables (CIVIS_DATABASE and CIVIS_API_KEY) or pass them as keyword arguments.

+
+
+

APIÂś

+
+
+class parsons.CivisClient(db=None, api_key=None, **kwargs)[source]Âś
+

Instantiate the Civis class.

+
+
Args:
+
+
db: str or int
+
The Civis Redshift database. Can be a database id or the name of the +database.
+
api_key: str
+
The Civis api key.
+
**kwargs: args
+
Option settings for the client that are described in the documentation.
+
+
+
Returns:
+
Civis class
+
+
+
+client = NoneÂś
+

The Civis API client. Utilize this attribute to access to lower level and more +advanced methods which might not be surfaced in Parsons. A list of the methods +can be found by reading the Civis API client documentation.

+
+ +
+
+query(sql, preview_rows=10, polling_interval=None, hidden=True, wait=True)[source]Âś
+

Execute a SQL statement as a Civis query.

+

Run a query that may return no results or where only a small +preview is required. To execute a query that returns a large number +of rows, see read_civis_sql().

+
+
Args
+
+
sql: str
+
The SQL statement to execute.
+
preview_rows: int, optional
+
The maximum number of rows to return. No more than 100 rows can be +returned at once.
+
polling_interval: int or float, optional
+
Number of seconds to wait between checks for query completion.
+
hidden: bool, optional
+
If True (the default), this job will not appear in the Civis UI.
+
wait: boolean
+
If True, will wait for query to finish executing before exiting +the method.
+
+
+
Returns
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+table_import(table_obj, table, max_errors=None, existing_table_rows='fail', diststyle=None, distkey=None, sortkey1=None, sortkey2=None, wait=True, **civisargs)[source]Âś
+

Write the table to a Civis Redshift cluster. Additional key word +arguments can passed to civis.io.dataframe_to_civis() # noqa: E501

+
+
Args
+
+
table_obj: obj
+
A Parsons Table object
+
table: str
+
The schema and table you want to upload to. E.g., ‘scratch.table’. Schemas +or tablenames with periods must be double quoted, e.g. ‘scratch.”my.table”’.
+
api_key: str
+
Your Civis API key. If not given, the CIVIS_API_KEY environment variable will be +used.
+
max_errors: int
+
The maximum number of rows with errors to remove from the import before failing.
+
existing_table_rows: str
+
The behaviour if a table with the requested name already exists. One of +‘fail’, ‘truncate’, ‘append’ or ‘drop’. Defaults to ‘fail’.
+
diststyle: str
+
The distribution style for the table. One of ‘even’, ‘all’ or ‘key’.
+
distkey: str
+
The column to use as the distkey for the table.
+
sortkey1: str
+
The column to use as the sortkey for the table.
+
sortkey2: str
+
The second column in a compound sortkey for the table.
+
wait: boolean
+
Wait for write job to complete before exiting method.
+
+
+
Returns
+
None
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/facebook_ads.html b/docs/html/facebook_ads.html new file mode 100644 index 0000000000..c0ffb965e3 --- /dev/null +++ b/docs/html/facebook_ads.html @@ -0,0 +1,485 @@ + + + + + + + + + + + FacebookAds — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

FacebookAdsÂś

+
+

OverviewÂś

+

The FacebookAds class allows you to interact with parts of the Facebook Business API. +Currently the connector provides methods for creating and deleting custom audiences, and for adding users to audiences.

+

The FacebookAds connector is a thin wrapper around the FB Business SDK, +so some of that SDK is exposed, e.g., you may see exceptions like FacebookRequestError.

+

Facebook’s advertising and Pages systems are massive. Check out the overviews for more information:

+ +
+

Note

+
+
Authentication
+
Before using FacebookAds, you’ll need the following: +* A FB application, specifically the app ID and secret. See https://developers.facebook.com to find your app details or create a new app. Note that a Facebook app isn’t necessarily visible to anyone but you: it’s just needed to interact with the Facebook API. +* A FB ad account. See https://business.facebook.com to find your ad accounts or create a new one. +* A FB access token representing a user that has access to the relevant ad account. You can generate an access token from your app, either via the Facebook API itself, or via console at https://developers.facebook.com.
+
+
+
+
+

QuickstartÂś

+

To instantiate the FacebookAds class, you can either store your authentication credentials as environmental variables +(FB_APP_ID, FB_APP_SECRET, FB_ACCESS_TOKEN, and FB_AD_ACCOUNT_ID) or pass them in as arguments:

+
from parsons import FacebookAds
+
+# First approach: Use environmental variables
+fb = FacebookAds()
+
+# Second approach: Pass credentials as argument
+fb = FacebookAds(app_id='my_app_id',
+                 app_secret='my_app_secret',
+                 access_token='my_access_token',
+                 ad_account_id='my_account_id')
+
+
+

You can then use various methods:

+
# Create audience
+fb.create_custom_audience(name='audience_name', data_source='USER_PROVIDED_ONLY')
+
+# Delete audience
+fb.delete_custom_audience(audience_id='123')
+
+
+
+
+

APIÂś

+
+
+class parsons.FacebookAds(app_id=None, app_secret=None, access_token=None, ad_account_id=None)[source]Âś
+

Instantiate the FacebookAds class

+
+
Args:
+
+
app_id: str
+
A Facebook app ID. Required if env var FB_APP_ID is not populated.
+
app_secret: str
+
A Facebook app secret. Required if env var FB_APP_SECRET is not populated.
+
access_token: str
+
A Facebook access token. Required if env var FB_ACCESS_TOKEN is not populated.
+
ad_account_id: str
+
A Facebook ad account ID. Required if env var FB_AD_ACCOUNT_ID isnot populated.
+
+
+
+
+
+static get_match_table_for_users_table(users_table)[source]Âś
+

Prepared an input table for matching into a FB custom audience, by identifying which +columns are supported for matching, renaming those columns to what FB expects, and +cutting away the other columns.

+

See FacebookAds.create_custom_audience for more details.

+
+
Args:
+
+
users_table: Table
+
The source table for matching
+
+
+
Returns:
+
+
Table
+
The prepared table
+
+
+
+
+ +
+
+create_custom_audience(name, data_source, description=None)[source]Âś
+

Creates a FB custom audience.

+
+
Args:
+
+
name: str
+
The name of the custom audience
+
data_source: str
+
One of USER_PROVIDED_ONLY, PARTNER_PROVIDED_ONLY, or +BOTH_USER_AND_PARTNER_PROVIDED. +This tells FB whether the data for a custom audience was provided by actual users, +or acquired via partners. FB requires you to specify.
+
description: str
+
Optional. The description of the custom audience
+
+
+
Returns:
+
ID of the created audience
+
+
+ +
+
+delete_custom_audience(audience_id)[source]Âś
+

Deletes a FB custom audience.

+
+
Args:
+
+
audience_id: str
+
The ID of the custom audience to delete.
+
+
+
+
+ +
+
+add_users_to_custom_audience(audience_id, users_table)[source]Âś
+

Adds user data to a custom audience.

+

Each user row in the provided table should have at least one of the supported columns +defined. Otherwise the row will be ignored. Beyond that, the rows may have any other +non-supported columns filled out, and those will all be ignored.

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Column TypeValid Column Names
Email Addressemail, email address, voterbase_email
First Namefn, first, first name, vb_tsmart_first_name
Last Nameln, last, last name, vb_tsmart_last_name
Phone Numberphone, phone number, cell, landline, vb_voterbase_phone, vb_voterbase_phone_wireless
Cityct, city, vb_vf_reg_city, vb_tsmart_city
Statest, state, state code, vb_vf_source_state, vb_tsmart_state, vb_vf_reg_state, vb_vf_reg_cass_state
Zip Codezip, zip code, vb_vf_reg_zip, vb_tsmart_zip
Countycountry, country code
Gendergen, gender, sex, vb_vf_reg_zip
Birth Yeardoby, dob year, birth year
Birth Monthdobm, dob month, birth month
Birth Daydobd, dob day, birth day
Date of Birthdob, vb_voterbase_dob, vb_tsmart_dob (Format: YYYYMMDD)
+

The column names will be normalized before comparing to this list - eg. removing +whitespace and punctuation - so you don’t need to match exactly.

+

If more than one of your columns map to a single FB key, then for each row we’ll use any +non-null value for those columns. +Eg. If you have both vb_voterbase_phone and vb_voterbase_phone_wireless (which +both map to the FB “phone” key), then for each person in your table, we’ll try to pick one +valid phone number.

+

For details of the expected data formats for each column type, see +Facebook Audience API, +under “Hashing and Normalization for Multi-Key”.

+

Note that you shouldn’t have to do normalization on your data, as long as it’s +reasonably close to what FB expects. Eg. It will convert “Male” to “m”, and ” JoSH” +to “josh”.

+

FB will attempt to match the data to users in their system. You won’t be able to find out +which users were matched. But if you provide enough data, FB will tell you roughly how many +of them were matched. (You can find the custom audience in your business account at +https://business.facebook.com).

+

Note that because FB’s matching is so opaque, it will hide lots of data issues. Eg. if you +use “United States” instead of “US” for the “country” field, the API will appear to accept +it, when in reality it is probably ignoring that field. So read the docs if you’re worried.

+
+
Args:
+
+
audience_id: str
+
The ID of the custom audience to delete.
+
users_table: obj
+
Parsons table
+
+
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/genindex.html b/docs/html/genindex.html new file mode 100644 index 0000000000..ab07cbf7d6 --- /dev/null +++ b/docs/html/genindex.html @@ -0,0 +1,1509 @@ + + + + + + + + + + + + Index — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Index
  • + + +
  • + + + +
  • + +
+ + +
+
+
+
+ + +

Index

+ +
+ A + | B + | C + | D + | E + | F + | G + | H + | I + | K + | L + | M + | N + | O + | P + | Q + | R + | S + | T + | U + | V + | Z + +
+

A

+ + + +
+ +

B

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

H

+ + +
+ +

I

+ + + +
+ +

K

+ + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

N

+ + +
+ +

O

+ + +
+ +

P

+ + + +
+ +

Q

+ + + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + + +
+ +

V

+ + +
+ +

Z

+ + +
+ + + +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/google.html b/docs/html/google.html new file mode 100644 index 0000000000..e391f6f57e --- /dev/null +++ b/docs/html/google.html @@ -0,0 +1,1226 @@ + + + + + + + + + + + Google — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

GoogleÂś

+

Google Cloud services allow you to upload and manipulate Tables as spreadsheets (via GoogleSheets) or query them as SQL database tables (via GoogleBigQuery). You can also upload/store/download them as binary objects (via GoogleCloudStorage). Finally, Google offers an API for civic information using GoogleCivic.

+

For all of these services you will need to enable the APIs for your Google Cloud account and obtain authentication tokens to access them from your scripts. If you are the administrator of your Google Cloud account, you can do both of these at Google Cloud Console APIs and Services.

+
+

BigQueryÂś

+
+

OverviewÂś

+

Google BigQuery is a cloud data warehouse solution. Data is stored in tables, and users can query using SQL. +BigQuery uses datasets as top level containers for tables, and datasets are themselves contained within +Google Cloud projects.

+
+
+

QuickstartÂś

+

To instantiate the GoogleBigQuery class, you can pass the constructor a string containing either the name of the Google service account credentials file or a JSON string encoding those credentials. Alternatively, you can set the environment variable GOOGLE_APPLICATION_CREDENTIALS to be either of those strings and call the constructor without that argument.

+
from parsons import GoogleBigQuery
+
+# Set as environment variable so we don't have to pass it in. May either
+# be the file name or a JSON encoding of the credentials.
+os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'google_credentials_file.json'
+
+big_query = GoogleBigQuery()
+
+
+

Alternatively, you can pass the credentials in as an argument. In the example below, we also specify the project.

+
# Project in which we're working
+project = 'parsons-test'
+big_query = GoogleBigQuery(app_creds='google_credentials_file.json',
+                           project=project)
+
+
+

We can now upload/query data.

+
dataset = 'parsons_dataset'
+table = 'parsons_table'
+
+# Table name should be project.dataset.table, or dataset.table, if
+# working with the default project
+table_name = project + '.' + dataset + '.' + table
+
+# Must be pre-existing bucket. Create via GoogleCloudStorage() or
+# at https://console.cloud.google.com/storage/create-bucket. May be
+# omitted if the name of the bucket is specified in environment
+# variable GCS_TEMP_BUCKET.
+gcs_temp_bucket = 'parsons_bucket'
+
+# Create dataset if it doesn't already exist
+big_query.client.create_dataset(dataset=dataset, exists_ok=True)
+
+parsons_table = Table([{'name':'Bob', 'party':'D'},
+                       {'name':'Jane', 'party':'D'},
+                       {'name':'Sue', 'party':'R'},
+                       {'name':'Bill', 'party':'I'}])
+
+# Copy table in to create new BigQuery table
+big_query.copy(table_obj=parsons_table,
+               table_name=table_name,
+               tmp_gcs_bucket=gcs_temp_bucket)
+
+# Select from project.dataset.table
+big_query.query(f'select name from {table_name} where party = "D"')
+
+# Delete the table when we're done
+big_query.client.delete_table(table=table_name)
+
+
+
+
+

APIÂś

+
+
+class parsons.google.google_bigquery.GoogleBigQuery(app_creds=None, project=None, location=None)[source]Âś
+

Class for querying BigQuery table and returning the data as Parsons tables.

+

This class requires application credentials in the form of a json. It can be passed +in the following ways:

+
    +
  • Set an environmental variable named GOOGLE_APPLICATION_CREDENTIALS with the +local path to the credentials json.

    +

    Example: GOOGLE_APPLICATION_CREDENTALS='path/to/creds.json'

    +
  • +
  • Pass in the path to the credentials using the app_creds argument.

    +
  • +
  • Pass in a json string using the app_creds argument.

    +
  • +
+
+
Args:
+
+
app_creds: str
+
A credentials json string or a path to a json file. Not required +if GOOGLE_APPLICATION_CREDENTIALS env variable set.
+
project: str
+
The project which the client is acting on behalf of. If not passed +then will use the default inferred environment.
+
location: str
+
Default geographic location for tables
+
+
+
+
+
+copy(table_obj, table_name, if_exists='fail', tmp_gcs_bucket=None, gcs_client=None, job_config=None, **load_kwargs)[source]Âś
+

Copy a Parsons Table into Google BigQuery via Google Cloud Storage.

+
+
Args:
+
+
table_obj: obj
+
The Parsons Table to copy into BigQuery.
+
table_name: str
+
The table name to load the data into.
+
if_exists: str
+
If the table already exists, either fail, append, drop +or truncate the table.
+
tmp_gcs_bucket: str
+
The name of the Google Cloud Storage bucket to use to stage the data to load +into BigQuery. Required if GCS_TEMP_BUCKET is not specified.
+
gcs_client: object
+
The GoogleCloudStorage Connector to use for loading data into Google Cloud Storage.
+
job_config: object
+
A LoadJobConfig object to provide to the underlying call to load_table_from_uri +on the BigQuery client. The function will create its own if not provided.
+
**load_kwargs: kwargs
+
Arguments to pass to the underlying load_table_from_uri call on the BigQuery +client.
+
+
+
+
+ +
+
+delete_table(table_name)[source]Âś
+

Delete a BigQuery table.

+
+
Args:
+
+
table_name: str
+
The name of the table to delete.
+
+
+
+
+ +
+
+query(sql, parameters=None)[source]Âś
+

Run a BigQuery query and return the results as a Parsons table.

+

To include python variables in your query, it is recommended to pass them as parameters, +following the BigQuery style where parameters are prefixed with @`s. +Using the ``parameters` argument ensures that values are escaped properly, and avoids SQL +injection attacks.

+

Parameter Examples

+
name = "Beatrice O'Brady"
+sql = 'SELECT * FROM my_table WHERE name = %s'
+rs.query(sql, parameters=[name])
+
+
+
name = "Beatrice O'Brady"
+sql = "SELECT * FROM my_table WHERE name = %(name)s"
+rs.query(sql, parameters={'name': name})
+
+
+
+
Args:
+
+
sql: str
+
A valid BigTable statement
+
parameters: dict
+
A dictionary of query parameters for BigQuery.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+table_exists(table_name)[source]Âś
+

Check whether or not the Google BigQuery table exists in the specified dataset.

+
+
Args:
+
+
table_name: str
+
The name of the BigQuery table to check for
+
+
+
Returns:
+
+
bool
+
True if the table exists in the specified dataset, false otherwise
+
+
+
+
+ +
+
+clientÂś
+

Get the Google BigQuery client to use for making queries.

+
+
Returns:
+
google.cloud.bigquery.client.Client
+
+
+ +
+ +
+

+
+
+

Cloud StorageÂś

+
+

OverviewÂś

+

Google Cloud Storage is a cloud file storage system. It uses buckets in which to +store arbitrary files referred to as blobs. You may use this connector to upload Parsons tables as blobs, download them to files, and list available blobs.

+

To use the GoogleCloudStorage class, you will need Google service account credentials. If you are the administrator of your Google Cloud account, you can generate them in the Google Cloud Console APIs and Services.

+
+
+

QuickstartÂś

+

To instantiate the GoogleBigQuery class, you can pass the constructor a string containing either the name of your Google service account credentials file or a JSON string encoding those credentials. Alternatively, you can set the environment variable GOOGLE_APPLICATION_CREDENTIALS to be either of those strings and call the constructor without that argument.

+
from parsons import GoogleCloudStorage
+
+# Set as environment variable so we don't have to pass it in. May either
+# be the file name or a JSON encoding of the credentials.
+os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'google_credentials_file.json'
+
+gcs = GoogleCloudStorage()
+
+
+

Alternatively, you can pass the credentials in as an argument. In the example below, we also specify the project.

+
credentials_filename = 'google_credentials_file.json'
+project = 'parsons-test'    # Project in which we're working
+gcs = GoogleCloudStorage(app_creds=credentials_filename, project=project)
+
+
+

Now we can create buckets, upload blobs to them and and list/retrieve +the available blobs.

+
gcs.create_bucket('parsons_bucket')
+gcs.list_buckets()
+
+gcs.upload_table(bucket='parsons_bucket', table=parsons_table, blob_name='parsons_blob')
+gcs.get_blob(bucket_name='parsons_bucket', blob_name='parsons_blob')
+
+
+
+
+

APIÂś

+
+
+class parsons.google.google_cloud_storage.GoogleCloudStorage(app_creds=None, project=None)[source]Âś
+

This class requires application credentials in the form of a json. It can be passed +in the following ways:

+
    +
  • Set an environmental variable named GOOGLE_APPLICATION_CREDENTIALS with the +local path to the credentials json.

    +

    Example: GOOGLE_APPLICATION_CREDENTALS='path/to/creds.json'

    +
  • +
  • Pass in the path to the credentials using the app_creds argument.

    +
  • +
  • Pass in a json string using the app_creds argument.

    +
  • +
+
+
Args:
+
+
app_creds: str
+
A credentials json string or a path to a json file. Not required +if GOOGLE_APPLICATION_CREDENTIALS env variable set.
+
project: str
+
The project which the client is acting on behalf of. If not passed +then will use the default inferred environment.
+
+
+
Returns:
+
GoogleCloudStorage Class
+
+
+
+client = NoneÂś
+

Access all methods of google.cloud package

+
+ +
+
+list_buckets()[source]Âś
+

Returns a list of buckets

+
+
Returns:
+
List of buckets
+
+
+ +
+
+bucket_exists(bucket_name)[source]Âś
+

Verify that a bucket exists

+
+
Args:
+
+
bucket_name: str
+
The name of the bucket
+
+
+
Returns:
+
boolean
+
+
+ +
+
+get_bucket(bucket_name)[source]Âś
+

Returns a bucket object

+
+
Args:
+
+
bucket_name: str
+
The name of bucket
+
+
+
Returns:
+
GoogleCloud Storage bucket
+
+
+ +
+
+create_bucket(bucket_name)[source]Âś
+

Create a bucket.

+
+
Args:
+
+
bucket_name: str
+
A globally unique name for the bucket.
+
+
+
Returns:
+
None
+
+
+ +
+
+delete_bucket(bucket_name, delete_blobs=False)[source]Âś
+

Delete a bucket. Will fail if not empty unless delete_blobs argument +is set to True.

+
+
Args:
+
+
bucket_name: str
+
The name of the bucket
+
delete_blobs: boolean
+
Delete blobs in the bucket, if it is not empty
+
+
+
Returns:
+
None
+
+
+ +
+
+list_blobs(bucket_name, max_results=None, prefix=None)[source]Âś
+

List all of the blobs in a bucket

+
+
Args:
+
+
bucket_name: str
+
The name of the bucket
+
max_results: int
+
TBD
+
prefix_filter: str
+
A prefix to filter files
+
+
+
Returns:
+
A list of blob names
+
+
+ +
+
+blob_exists(bucket_name, blob_name)[source]Âś
+

Verify that a blob exists in the specified bucket

+
+
Args:
+
+
bucket_name: str
+
The bucket name
+
blob_name: str
+
The name of the blob
+
+
+
Returns:
+
boolean
+
+
+ +
+
+get_blob(bucket_name, blob_name)[source]Âś
+

Get a blob object

+
+
Args:
+
+
bucket_name: str
+
A bucket name
+
blob_name: str
+
A blob name
+
+
+
Returns:
+
A Google Storage blob object
+
+
+ +
+
+put_blob(bucket_name, blob_name, local_path)[source]Âś
+

Puts a blob (aka file) in a bucket

+
+
Args:
+
+
blob_name:
+
The name of blob to be stored in the bucket
+
bucket_name:
+
The name of the bucket to store the blob
+
local_path: str
+
The local path of the file to upload
+
+
+
Returns:
+
None
+
+
+ +
+
+download_blob(bucket_name, blob_name, local_path=None)[source]Âś
+

Gets a blob from a bucket

+
+
Args:
+
+
bucket_name: str
+
The name of the bucket
+
blob_name: str
+
The name of the blob
+
local_path: str
+
The local path where the file will be downloaded. If not specified, a temporary +file will be created and returned, and that file will be removed automatically +when the script is done running.
+
+
+
Returns:
+
+
str
+
The path of the downloaded file
+
+
+
+
+ +
+
+delete_blob(bucket_name, blob_name)[source]Âś
+

Delete a blob

+
+
Args:
+
+
bucket_name: str
+
The bucket name
+
blob_name: str
+
The blob name
+
+
+
Returns:
+
None
+
+
+ +
+
+upload_table(table, bucket_name, blob_name, data_type='csv', default_acl=None)[source]Âś
+

Load the data from a Parsons table into a blob.

+
+
Args:
+
+
table: obj
+
A Parsons Table
+
bucket_name: str
+
The name of the bucket to upload the data into.
+
blob_name: str
+
The name of the blob to upload the data into.
+
data_type: str
+
The file format to use when writing the data. One of: csv or json
+
+
+
+
+ +
+ +
+

+
+
+

CivicÂś

+
+

OverviewÂś

+

Google Civic is an API which provides helpful information about elections. In order to access Google Civic you must +create a Google Developer Key in their API console. In order to +use Google Civic, you must enable this specific end point.

+

The Google Civic API utilizes the Voting Information Project to collect +key civic information such as personalized ballots and polling location information.

+
+
+

QuickstartÂś

+

To instantiate the GoogleCivic class, you can pass the constructor a string containing the Google Civic API key you’ve generated for your project, or set the environment variable GOOGLE_CIVIC_API_KEY to that value.

+
from parsons import GoogleCivic
+
+# Set as environment variable so we don't have to pass it in. May either
+# be the file name or a JSON encoding of the credentials.
+os.environ['GOOGLE_CIVIC_API_KEY'] = 'AIzaSyAOVZVeL-snv3vNDUdw6QSiCvZRXk1xM'
+
+google_civic = GoogleCivic()
+
+
+

Alternatively, you can pass the credentials in as an argument. In the example below, we also specify the project.

+
google_civic = GoogleCivic(api_key='AIzaSyAOVZVeL-snv3vNDUdw6QSiCvZRXk1xM')
+
+
+

Now you can retrieve election information

+
elections = google_civic.get_elections()
+
+address = '1600 Pennsylvania Avenue, Washington DC'
+election_id = '7000'  # General Election
+google_civic.get_polling_location(election_id=election_id, address=address)
+
+
+
+
+

APIÂś

+
+
+class parsons.google.google_civic.GoogleCivic(api_key=None)[source]Âś
+
+
Args:
+
+
api_key : str
+
A valid Google api key. Not required if GOOGLE_CIVIC_API_KEY +env variable set.
+
+
+
Returns:
+
class
+
+
+
+get_elections()[source]Âś
+

Get a collection of information about elections and voter information.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_polling_location(election_id, address)[source]Âś
+

Get polling location information for a given address.

+
+
Args:
+
+
election_id: int
+
A valid election id. Election ids can be found by running the +get_elections() method.
+
address: str
+
A valid US address in a single string.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_polling_locations(election_id, table, address_field='address')[source]Âś
+

Get polling location information for a table of addresses.

+
+
Args:
+
+
election_id: int
+
A valid election id. Election ids can be found by running the +get_elections() method.
+
address: str
+
A valid US address in a single string.
+
address_field: str
+
The name of the column where the address is stored.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+

+
+
+

Google SheetsÂś

+
+

OverviewÂś

+

The GoogleSheets class allows you to interact with Google service account spreadsheets, called “Google Sheets.” You can create, modify, read, format, share and delete sheets with this connector.

+

In order to instantiate the class, you must pass Google service account credentials as a dictionary, or store the credentials as a JSON string in the GOOGLE_DRIVE_CREDENTIALS environment variable. Typically you’ll get the credentials from the Google Developer Console (look for the “Google Drive API”).

+
+
+

QuickstartÂś

+

To instantiate the GoogleSheets class, you can either pass the constructor a dict containing your Google service account credentials or define the environment variable GOOGLE_DRIVE_CREDENTIALS to contain a JSON encoding of the dict.

+
from parsons import GoogleSheets
+
+# First approach: Use API credentials via environmental variables
+sheets = GoogleSheets()
+
+# Second approach: Pass API credentials as argument
+credential_filename = 'google_drive_service_credentials.json'
+credentials = json.load(open(credential_filename))
+sheets = GoogleSheets(google_keyfile_dict=credentials)
+
+
+

You can then create/modify/retrieve documents using instance methods:

+
sheet_id = sheets.create_spreadsheet('Voter Cell Phones')
+sheets.append_to_sheet(sheet_id, people_with_cell_phones)
+parsons_table = sheets.get_worksheet(sheet_id)
+
+
+
+
+

APIÂś

+
+
+class parsons.google.google_sheets.GoogleSheets(google_keyfile_dict=None)[source]Âś
+

A connector for Google Sheets, handling data import and export.

+
+
Args:
+
+
google_keyfile_dict: dict
+
A dictionary of Google Drive API credentials, parsed from JSON provided +by the Google Developer Console. Required if env variable +GOOGLE_DRIVE_CREDENTIALS is not populated.
+
+
+
+
+
+list_worksheets(spreadsheet_id)[source]Âś
+

Return a list of worksheets in the spreadsheet.

+
+
Args:
+
+
spreadsheet_id: str
+
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
+
+
+
Returns:
+
+
list
+
A List of worksheets order by their index
+
+
+
+
+ +
+
+get_worksheet_index(spreadsheet_id, title)[source]Âś
+

Get the first sheet in a Google spreadsheet with the given title. The +title is case sensitive and the index begins with 0.

+
+
Args:
+
+
spreadsheet_id: str
+
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
+
title: str
+
The sheet title
+
+
+
Returns:
+
+
str
+
The sheet index
+
+
+
+
+ +
+
+get_worksheet(spreadsheet_id, worksheet=0)[source]Âś
+

Create a parsons table from a sheet in a Google spreadsheet, given the sheet index.

+
+
Args:
+
+
spreadsheet_id: str
+
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
+
worksheet: str or int
+
The index or the title of the worksheet. The index begins with +0.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+share_spreadsheet(spreadsheet_id, sharee, share_type='user', role='reader', notify=True, notify_message=None, with_link=False)[source]Âś
+

Share a spreadsheet with a user, group of users, domain and/or the public.

+
+
Args:
+
+
spreadsheet_id: str
+
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
+
sharee: str
+
User or group e-mail address, domain name to share the spreadsheet +with. To share publicly, set sharee value to None.
+
share_type: str
+
The sharee type. Allowed values are: user, group, domain, +anyone.
+
role: str
+
The primary role for this user. Allowed values are: owner, +writer, reader.
+
notify: boolean
+
Whether to send an email to the target user/domain.
+
email_message: str
+
The email to be sent if notify kwarg set to True.
+
with_link: boolean
+
Whether a link is required for this permission.
+
+
+
+
+ +
+
+get_spreadsheet_permissions(spreadsheet_id)[source]Âś
+

List the permissioned users and groups for a spreadsheet.

+
+
Args:
+
+
spreadsheet_id: str
+
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+create_spreadsheet(title, editor_email=None)[source]Âś
+

Create a Google spreadsheet from a Parsons table. Optionally shares the new doc with +the given email address.

+
+
Args:
+
+
title: str
+
The human-readable title of the new spreadsheet
+
editor_email: str (optional)
+
Email address which should be given permissions on this spreadsheet
+
+
+
Returns:
+
+
str
+
The spreadsheet ID
+
+
+
+
+ +
+
+delete_spreadsheet(spreadsheet_id)[source]Âś
+

Deletes a Google spreadsheet.

+
+
Args:
+
+
spreadsheet_id: str
+
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
+
+
+
+
+ +
+
+add_sheet(spreadsheet_id, title=None, rows=100, cols=25)[source]Âś
+

Adds a sheet to a Google spreadsheet.

+
+
Args:
+
+
spreadsheet_id: str
+
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
+
rows: int
+
Number of rows
+
cols
+
Number of cols
+
+
+
Returns:
+
+
str
+
The sheet index
+
+
+
+
+ +
+
+append_to_sheet(spreadsheet_id, table, worksheet=0, user_entered_value=False, **kwargs)[source]Âś
+

Append data from a Parsons table to a Google sheet. Note that the table’s columns are +ignored, as we’ll be keeping whatever header row already exists in the Google sheet.

+
+
Args:
+
+
spreadsheet_id: str
+
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
+
table: obj
+
Parsons table
+
worksheet: str or int
+
The index or the title of the worksheet. The index begins with +0.
+
user_entered_value: bool (optional)
+
If True, will submit cell values as entered (required for entering formulas). +Otherwise, values will be entered as strings or numbers only.
+
+
+
+
+ +
+
+overwrite_sheet(spreadsheet_id, table, worksheet=0, user_entered_value=False, **kwargs)[source]Âś
+

Replace the data in a Google sheet with a Parsons table, using the table’s columns as the +first row.

+
+
Args:
+
+
spreadsheet_id: str
+
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
+
table: obj
+
Parsons table
+
worksheet: str or int
+
The index or the title of the worksheet. The index begins with +0.
+
user_entered_value: bool (optional)
+
If True, will submit cell values as entered (required for entering formulas). +Otherwise, values will be entered as strings or numbers only.
+
+
+
+
+ +
+
+format_cells(spreadsheet_id, range, cell_format, worksheet=0)[source]Âś
+

Format the cells of a worksheet.

+
+
Args:
+
+
spreadsheet_id: str
+
The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
+
range: str
+
The cell range to format. E.g. "A2" or "A2:B100"
+
cell_format: dict
+
The formatting to apply to the range. Full options are specified in +the GoogleSheets API documentation.
+
worksheet: str or int
+
The index or the title of the worksheet. The index begins with +0.
+
+
+
+

Examples

+
# Set 'A4' cell's text format to bold
+gs.format_cells(sheet_id, "A4", {"textFormat": {"bold": True}}, worksheet=0)
+
+# Color the background of 'A2:B2' cell range yellow,
+# change horizontal alignment, text color and font size
+gs.format_cells.format(sheet_id, "A2:B2", {
+    "backgroundColor": {
+        "red": 0.0,
+        "green": 0.0,
+        "blue": 0.0
+        },
+    "horizontalAlignment": "CENTER",
+    "textFormat": {
+        "foregroundColor": {
+            "red": 1.0,
+            "green": 1.0,
+            "blue": 0.0
+            },
+            "fontSize": 12,
+            "bold": True
+            }
+        }, worksheet=0)
+
+
+
+ +
+ +
+
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/index.html b/docs/html/index.html new file mode 100644 index 0000000000..38ec3ca1f2 --- /dev/null +++ b/docs/html/index.html @@ -0,0 +1,452 @@ + + + + + + + + + + + About — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

AboutÂś

+

Parsons, named after Lucy Parsons, is a Python package that contains a growing list of connectors and integrations to move data between various tools. Parsons is focused on integrations and connectors for tools utilized by the progressive community.

+

Parsons was built out of a belief that progressive organizations spend far too much time building the same integrations, over and over and over again, while they should be engaged in more important and impactful work. It +was built and is maintained by The Movement Cooperative.

+
+
+

The Movement CooperativeÂś

+

The Movement Cooperative is a member led organization focused on providing data, tools and strategic support for the progressive community. Our mission is to break down technological barriers for organizations that fight for social justice.

+
+
+

License and UsageÂś

+

Usage of Parsons is governed by the TMC Parsons License, which allows for unlimited non-commercial usage, provided that individuals and organizations adhere to our broad values statement.

+
+
+

Design GoalsÂś

+

The goal of Parsons is to make the movement of data between systems as easy and straightforward as possible. Simply put, we seek to reduce the lines of code that are written by the progressive community. Not only is this a waste of time, but we rarely have the capacity and resources to fully unittest our scripts.

+_images/parsons_diagram.png +

Parsons seeks to be flexible from a data ingestion and output perspective, while providing ETL tools that recognize that our data is always messy. Central to this concept is the Parsons Table the table-like object that most methods return.

+
+
+

QuickStartÂś

+
# VAN - Download activist codes to a CSV
+
+from parsons import VAN
+van = VAN(db='MyVoters')
+ac = van.get_activist_codes()
+ac.to_csv('my_activist_codes.csv')
+
+# Redshift - Create a table from a CSV
+
+from parsons import Table
+tbl = Table.from_csv('my_table.csv')
+tbl.to_redshift('my_schema.my_table')
+
+# Redshift - Export from a query to CSV
+
+from parsons import Redshift
+sql = 'select * from my_schema.my_table'
+rs = Redshift()
+tbl = rs.query(sql)
+tbl.to_csv('my_table.csv')
+
+# Upload a file to S3
+
+from parsons import S3
+s3 = S3()
+s3.put_file('my_bucket','my_table.csv')
+
+# TargetSmart - Append data to a record
+
+from parsons import TargetSmart
+ts = TargetSmart(api_key='MY_KEY')
+record = ts.data_enhance(231231231, state='DC')
+
+
+
+ +
+

InstallationÂś

+

You can install the most recent release by running: pip install parsons

+
+
+

LoggingÂś

+

Parsons uses the native python logging system. By default, log output will go to the console and look like:

+
parsons.modulename LOGLEVEL the specific log message
+
+
+

In your scripts that use Parsons, if you want to override the default Parsons logging behavior, just grab the “parsons” logger and tweak it:

+
import logging
+parsons_logger = logging.getLogger('parsons')
+# parsons_logger.setLevel('DEBUG')
+# parsons_logger.addHandler(...)
+# parsons_logger.setFormatter(...)
+
+
+
+
+

Minimizing Resource UtilizationÂś

+

A primary goal of Parsons is to make installing and using the library as easy as possible. Many +of the patterns and examples that we document are meant to show how easy it can be to use Parsons, +but sometimes these patterns trade accessibility for performance.

+

In environments where efficiency is important, we recommend users take the following steps to +minimize resource utilization:

+
+
    +
  1. Don’t import classes from the root Parsons package
  2. +
  3. Install only the dependencies you need
  4. +
+
+

*** Don’t import from the root Parsons package

+

Throughout the Parsons documentation, users are encouraged to load Parsons classes like so:

+

`python +from parsons import Table +`

+

In order to support this pattern, Parsons imports all of its classes into the root parsons +package. Due to how Python loads modules and packages, importing even one Parsons class results +in ALL of them being loaded. In order to avoid the resource consumption associated with loading all +of Parsons, we have created a mechanism to skip loading of call of the Parsons classes.

+

If you set PARSONS_SKIP_IMPORT_ALL in your environment, Parsons will not import all of its classes +into the root parsons package. Setting this environment variable means you will NOT be able to +import using the from parsons import X pattern. Instead, you will need to import directly from the +package where a class is defined (e.g. from parsons.etl import Table).

+

If you use the PARSONS_SKIP_IMPORT_ALL and import directly from the appropriate sub-package, +you will only load the classes that you need and will not consume extra resources. Using this +method, you may see as much as an 8x decrease in memory usage for Parsons.

+

*** Install only the dependencies you need

+

Since Parsons needs to talk to so many different API’s, it has a number of dependencies on other +Python libraries. It may be preferable to only install those external dependencies that you will +use.

+

For example, if you are running on Google Cloud, you might not need to use any of Parsons’ AWS +connectors. If you don’t use any of Parsons’ AWS connectors, then you won’t need to install the +Amazon Boto3 library that Parsons uses to access the Amazon APIs.

+

By default, installing Parsons will install all of its external dependencies. You can prevent +these dependencies from being installed with Parsons by passing the –no-deps flag to pip +when you install Parsons.

+

` +> pip install --no-deps parsons +`

+

Once you have Parsons installed without these external dependencies, you can then install +the libraries as you need them. You can use the requirements.txt as a reference to figure +out which version you need. At a minimum you will need to install the following libraries +for Parsons to work at all:

+
    +
  • petl
  • +
+
+ + + +
+ +
+
+ + + + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/logging.html b/docs/html/logging.html new file mode 100644 index 0000000000..79c16978ca --- /dev/null +++ b/docs/html/logging.html @@ -0,0 +1,483 @@ + + + + + + + + + + + Logger — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

LoggerÂś

+

The Logger class is a wrapper to python’s logging module. It is an attempt +to standardize and simplify logging in Parsons. All Parson submodules will have +logging implemented.

+
+

Note

+

By default, logging is not set and therefore no log messages are output. See +Quickstart below for option to turn it. +on.

+
+
+

QuickstartÂś

+

To view the standard log outputs of any Parsons module, pass in log=True.

+
from parsons import VAN
+
+ van = VAN(db='MyVoters', log=True) # Use the default log configuration
+
+ van.events()
+
+ >>> Getting events...
+ >>> Found 10 events.
+
+
+

Additionally, the Logger class is available for use in any arbitrary script. +See Default Configurations for more details.

+
from parsons.logging.plogger import Logger
+
+logger = Logger(__name__)
+
+logger.info('This is an info log')
+
+# By default debug is not printed to sys.stdout.
+logger.debug('This is a debug log')
+
+>>> This is an info log
+
+
+
+
+

Configuring the LoggerÂś

+

Currently, only three handlers are natively supported, with the hope to add more. +The handlers that are supported are: +- StreamHandler +- FileHandler +- SlackHandler

+

Most configurations option that can be done in the logging should be +supported by the Logger class.

+
logger = Logger(__name__, handlers=[])
+
+logger.add_file_handler('name_of_log_file.log', level='DEBUG')
+
+logger.add_stream_handler(sys.stdout, level-'INFO')
+
+logger.add_slack_handler(
+  os.environ['SLACK_URLS_PASSWORD'],
+  'db_test_chan',
+  level='ERROR')
+
+
+

To set the format, you can either pass in a Formatter object or a string format.

+
logger.add_file_handler(
+  'name_of_log_file.log',
+  level='DEBUG',
+  sformat='%(asctime)s:%(name)s:%(levelname)s:%(message)s')
+
+strm_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
+logger.add_stream_handler(sys.stdout, level-'INFO', formatter=strm_format)
+
+
+
+
+

Default ConfigurationÂś

+

The default logging configuration is:

+
    +
  • StreamHandler
      +
    • level: 'INFO'
    • +
    • format: '%(message)s'
    • +
    • stream: sys.out
    • +
    +
  • +
  • FileHandler
      +
    • level: 'DEBUG'
    • +
    • format: '%(asctime)s:%(name)s:%(levelname)s:%(message)s'
    • +
    • file: __name__.log
    • +
    +
  • +
+
+
+

LoggerÂś

+
+
+class parsons.logging.plogger.Logger(name, handlers=None, level='INFO')[source]Âś
+
+
+add_handler(handler, level='INFO', sformat=None, formatter=None, **kwargs)[source]Âś
+

Add a handler to the logger.

+
+
Args:
+
+
handler: Handler
+
A Handler object to attach to the logger.
+
level: str
+
Optional; One of DEBUG, INFO, WARNING, ERROR, +or CRITICAL. Defaults to INFO.
+
sformat: str
+
Optional; A string format for that can be passed into +logging.Formatter.
+
formatter: Formatter
+
Optional; A Formatter object for formatting log events.
+
+
+
+
+ +
+
+add_stream_handler(stream, **kwargs)[source]Âś
+

Add a stream handler to the logger.

+
+
Args:
+
+
stream: Stream
+
A Stream object to attach to the logger.
+
+
+
+
+ +
+
+add_file_handler(filename, **kwargs)[source]Âś
+

Add a file handler to the logger.

+
+
Args:
+
+
filename: str
+
The name of the file where log messages should be saved.
+
+
+
+
+ +
+
+add_slack_handler(token, channel, **kwargs)[source]Âś
+

Add a slack handler to the logger.

+
+
Args:
+
+
token: str
+
The API token for a slack app that has chat.post scope +permissions.
+
channel: str
+
The name of the channel where message will be sent.
+
+
+
+
+ +
+
+add_default_handlers()[source]Âś
+

Add a set of predefined handlers to the logger.

+

Adds a stream handler that send to sys.out and a file handler that +saves to <name>.log.

+
+ +
+
+debug(msg, *args, **kwargs)[source]Âś
+

Log a debug message.

+
+
Args:
+
+
msg: str
+
The message to log.
+
+
+
+
+ +
+
+info(msg, *args, **kwargs)[source]Âś
+

Log an info message.

+
+
Args:
+
+
msg: str
+
The message to log.
+
+
+
+
+ +
+
+warning(msg, *args, **kwargs)[source]Âś
+

Log a warning message.

+
+
Args:
+
+
msg: str
+
The message to log.
+
+
+
+
+ +
+
+error(msg, *args, **kwargs)[source]Âś
+

Log a error message.

+
+
Args:
+
+
msg: str
+
The message to log.
+
+
+
+
+ +
+
+critical(msg, *args, **kwargs)[source]Âś
+

Log a critical message.

+
+
Args:
+
+
msg: str
+
The message to log.
+
+
+
+
+ +
+
+exception(msg, *args, **kwargs)[source]Âś
+

Log an exception message.

+
+
Args:
+
+
msg: str
+
The message to log.
+
+
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/mobile_commons.html b/docs/html/mobile_commons.html new file mode 100644 index 0000000000..205c334947 --- /dev/null +++ b/docs/html/mobile_commons.html @@ -0,0 +1,569 @@ + + + + + + + + + + + Mobile Commons — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Mobile CommonsÂś

+

The MobileCommons class leverages the API of Upland Mobile (nee Mobile Commons).

+
+

CampaignsÂś

+
+
+class parsons.mobile_commons.mobile_commons.Campaigns(mc_connection)[source]Âś
+

Class for campaigns endpoints.

+
+
+campaigns(include_opt_in_paths=0, sort='asc', status=None, campaign_id=None)[source]Âś
+

Return a list of campaigns.

+
+
Args:
+
+
include_opt_in_paths: int
+
Set to 1 to include all opt-in path details. Default is 0.
+
sort: str
+
Set to asc or desc to sort by campaign ID ascending or +descending. Default is ascending.
+
status: str
+
Set to active or ended to filter results. Default is empty and +returns all campaigns.
+
campaign_id: str
+
Provide a specific campaign ID to view single result, invalid +campaign ID will return all campaigns.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+campaign(campaign_id, include_opt_in_paths=0)[source]Âś
+

Return a single campaign.

+
+
Args:
+
+
campaign_id: str
+
Provide a specific campaign ID to view single result, invalid +campaign ID will return all campaigns.
+
include_opt_in_paths: int
+
Set to 1 to include all opt-in path details. Default is 0.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

GroupsÂś

+
+
+class parsons.mobile_commons.mobile_commons.Groups(mc_connection)[source]Âś
+

Class for groups endpoints.

+
+
+groups()[source]Âś
+

Return a list of groups.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+group_members(group_id, limit=None, page=None, from_date=None, to_date=None)[source]Âś
+

Return a list of members in a group.

+
+
Args:
+
+
group_id: int
+
Required; The primary key of the group.
+
limit: int
+
Optional; Limits the number of returned profiles. Maximum of +1000.
+
page: int
+
Optional; Specifies which page, of the total number of pages of +results, to return.
+
from_date: str
+
Optional; Limits the returned profiles to ones updated after or +on this date time. ISO-8601 format.
+
to_date: str
+
Optional; Limits the returned profiles to ones updated before +or on this date time. ISO-8601 format.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+group_create(name)[source]Âś
+

Create a group.

+
+
Args:
+
+
name: str
+
Required; The name for the new group.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+group_add_members(group_id, phone_numbers)[source]Âś
+

Add a list of members to a group.

+
+
Args:
+
+
group_id: int
+
Required; The primary key of the group.
+
phone_numbers: list
+
Required; A list of phone numbers to add to the group. +If the phone numbers don’t exist, the will be created as +new profiles.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+group_remove_members(group_id, phone_number)[source]Âś
+

Remove a list of members from a group.

+
+
Args:
+
+
group_id: int
+
Required; The primary key of the group.
+
phone_number: list
+
Required; A list of phone numbers to remove from the group. +If the phone number is not a member of the group, it will +still return the group.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

ProfilesÂś

+
+
+class parsons.mobile_commons.mobile_commons.Profiles(mc_connection)[source]Âś
+

Class for profiles endpoints.

+
+
+profiles(phone_number=None, from_date=None, to_date=None, limit=None, page=None, include_custom_columns=None, include_subscriptions=None, include_clicks=None, include_members=None)[source]Âś
+

Return a list of profiles.

+
+
Args:
+
+
phone_number: list
+
Optional; Limits the returned profiles matching the provided +phone numbers. Phone numbers should be specified with country +code.
+
from_date: str
+
Optional; Limits the returned profiles to ones updated after +or on this date time. ISO-8601 format
+
to_date: str
+
Optional; Limits the returned profiles to ones updated before +or on this date time. ISO-8601 forma
+
limit: int
+
Optional; Limits the number of returned profiles. +Maximum of 1000
+
page: int
+
Optional; Specifies which page, of the total number of pages +of results, to return
+
include_custom_columns: boolean
+
Optional; Optional default ‘true’ - allows exclusion of custom +columns associated with profiles, pass ‘false’ to limit
+
include_subscriptions: boolean
+
Optional; Optional default ‘true’ - allows exclusion of +subscriptions for each profile, pass ‘false’ to limit
+
include_clicks: boolean
+
Optional; Optional default ‘true’ - allows exclusion of clicks
+
include_members: boolean
+
Optional; Optional default ‘true’ - allows exclusion of +profile member records maintained for integrations
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+profile_get(phone_number, company=None, include_messages=False, include_donations=False)[source]Âś
+

Return a single profile record.

+
+
Args:
+
+
phone_number: str
+
Required; The phone number for the profile to return.
+
company: str
+
Optional; If different that the one specified for the +connection. Default is the firm.
+
include_messages: boolean
+
Optional; Set to true to include associated text messages. +Default is false.
+
include_donations: boolean
+
Optional; Set to true to include associated mobile giving +donations, if any. Default is false.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+profile_update(phone_number, email=None, postal_code=None, first_name=None, last_name=None, street1=None, street2=None, city=None, state=None, country=None, custom_fields=None, opt_in_path_id=None)[source]Âś
+

Create or update a profile.

+
+
Args:
+
+
phone_number: str
+
Required; The phone number for the profile to update.
+
email: str
+
Optional; New email for the profile.
+
postal_code: str
+
Optional; New postal code for the profile.
+
first_name: str
+
Optional; New firstname for the profile.
+
last_name: str
+
Optional; New lastname for the profile.
+
street1: str
+
Optional; New street1 for the profile.
+
street2: str
+
Optional; New street2 for the profile.
+
city: str
+
Optional; New city for the profile.
+
state: str
+
Optional; New state for the profile.
+
country: str
+
Optional; New country for the profile.
+
custom_fields: dict
+
Optional; A dict of custom fields and their new values for the +profile.
+
opt_in_path_id: str
+
Optional; New opt_in_path_id for the profile.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+profile_opt_out(phone_number, campaign_id=None, subscription_id=None)[source]Âś
+

Opt out a profile from a campaign, subscription or all.

+
+
Args:
+
+
phone_number: str
+
Required; The phone number for the profile to opt out.
+
campaign_id: int
+
Optional; Opt-out this campaign only. Default is all campaigns.
+
subscription_id:int
+
Optional; Opt-out this subscription only. Default is all +subscriptions.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/mobilize_america.html b/docs/html/mobilize_america.html new file mode 100644 index 0000000000..08e2e41e69 --- /dev/null +++ b/docs/html/mobilize_america.html @@ -0,0 +1,504 @@ + + + + + + + + + + + Mobilize America — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Mobilize AmericaÂś

+
+

OverviewÂś

+

Mobilize America is an activist signup tool used by progressive organizations. +This class provides several methods for fetching organizations, people, and events from their +API, which is currently in alpha development.

+
+

Note

+
+
Authentication
+
Some methods in the MobilizeAmerica class require an API Key furnished by Mobilize America (private methods), +while others do not (public methods). Each method in this class contains a note indicating whether it is public +or private. For more information, see the API documentation.
+
+
+
+
+

QuickstartÂś

+

If you instantiate MobilizeAmerica without an API Key, you can only use public methods:

+

In order to use private methods, you must provide an API key either by setting the environmental +variable MOBILIZE_AMERICA_API_KEY or by passing an api_key argument as shown below:

+
+
+

APIÂś

+
+
+class parsons.MobilizeAmerica(api_key=None)[source]Âś
+

Instantiate MobilizeAmerica Class

+
+
api_key: str
+
An api key issued by Mobilize America. This is required to access some private methods.
+
Returns:
+
MobilizeAmerica Class
+
+
+
+get_organizations(updated_since=None)[source]Âś
+

Return all active organizations on the platform.

+
+
Args:
+
+
updated_since: str
+
Filter to organizations updated since given date (ISO Date)
+
+
+
Returns
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_events(organization_id=None, updated_since=None, timeslot_start=None, timeslot_end=None, timeslots_table=False, max_timeslots=None)[source]Âś
+

Fetch all public events on the platform.

+
+
Args:
+
+
organization_id: list or int
+
Filter events by a single or multiple organization ids
+
updated_since: str
+
Filter to events updated since given date (ISO Date)
+
timeslot_start: str
+
Filter by a timeslot start of events using >,``>=``,``<,``<= +operators and ISO date (ex. <=2018-12-13 05:00:00PM)
+
timeslot_end: str
+
Filter by a timeslot end of events using >,``>=``,``<,``<= +operators and ISO date (ex. <=2018-12-13 05:00:00PM)
+
timeslot_table: boolean
+
Return timeslots as a separate long table. Useful for extracting +to databases.
+
max_timeslots: int
+

If not returning a timeslot table, will unpack time slots. If do not +set this kwarg, it will add a column for each time slot. The argument +limits the number of columns and discards any additional timeslots +after that.

+

For example: If there are 20 timeslots associated with your event, +and you set the max time slots to 5, it will only return the first 5 +time slots as time_slot_0, time_slot_1 etc.

+

This is helpful in situations where you have a regular sync +running and want to ensure that the column headers remain static.

+
+
+
+
Returns
+
+
Parsons Table or dict or Parsons Tables
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_events_organization(organization_id=None, updated_since=None, timeslot_start=None, timeslot_end=None, timeslots_table=False, max_timeslots=None)[source]Âś
+

Fetch all public events for an organization. This includes both events owned +by the organization (as indicated by the organization field on the event object) +and events of other organizations promoted by this specified organization.

+
+

Note

+

API Key Required

+
+
+
Args:
+
+
organization_id: list or int
+
Filter events by a single or multiple organization ids
+
updated_since: str
+
Filter to events updated since given date (ISO Date)
+
timeslot_start: str
+
Filter by a timeslot start of events using >,``>=``,``<,``<= +operators and ISO date (ex. <=2018-12-13 05:00:00PM)
+
timeslot_end: str
+
Filter by a timeslot end of events using >,``>=``,``<,``<= +operators and ISO date (ex. <=2018-12-13 05:00:00PM)
+
timeslot_table: boolean
+
Return timeslots as a separate long table. Useful for extracting +to databases.
+
zipcode: str
+
Filter by a Events’ Locations’ postal code. If present, returns Events +sorted by distance from zipcode. If present, virtual events will not be returned.
+
max_dist: str
+
Filter Events’ Locations’ distance from provided zipcode.
+
visibility: str
+
Either PUBLIC or PRIVATE. Private events only return if user is authenticated; +if visibility=PRIVATE and user doesn’t have permission, no events returned.
+
exclude_full: bool
+
If exclude_full=true, filter out full Timeslots (and Events if all of an Event’s +Timeslots are full)
+
is_virtual: bool
+
is_virtual=false will return only in-person events, while is_virtual=true will +return only virtual events. If excluded, return virtual and in-person events. Note +that providing a zipcode also implies is_virtual=false.
+
event_types:enum
+
The type of the event, one of: CANVASS, PHONE_BANK, TEXT_BANK, MEETING, +COMMUNITY, FUNDRAISER, MEET_GREET, HOUSE_PARTY, VOTER_REG, TRAINING, +FRIEND_TO_FRIEND_OUTREACH, DEBATE_WATCH_PARTY, ADVOCACY_CALL, OTHER. +This list may expand in the future.
+
max_timeslots: int
+

If not returning a timeslot table, will unpack time slots. If do not +set this arg, it will add a column for each time slot. The argument +limits the number of columns and discards any additional timeslots +after that.

+

For example: If there are 20 timeslots associated with your event, +and you set the max time slots to 5, it will only return the first 5 +time slots as time_slot_0, time_slot_1 etc.

+

This is helpful in situations where you have a regular sync +running and want to ensure that the column headers remain static.

+
+
+
+
Returns
+
+
Parsons Table or dict or Parsons Tables
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_events_deleted(organization_id=None, updated_since=None)[source]Âś
+

Fetch deleted public events on the platform.

+
+
Args:
+
+
organization_id: list or int
+
Filter events by a single or multiple organization ids
+
updated_since: str
+
Filter to events updated since given date (ISO Date)
+
+
+
Returns
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_people(organization_id=None, updated_since=None)[source]Âś
+

Fetch all people (volunteers) who are affiliated with the organization.

+
+

Note

+

API Key Required

+
+
+
Args:
+
+
organization_id: list of int
+
Filter events by a single or multiple organization ids
+
updated_since: str
+
Filter to events updated since given date (ISO Date)
+
+
+
Returns
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_attendances(organization_id=None, updated_since=None)[source]Âś
+

Fetch all attendances which were either promoted by the organization or +were for events owned by the organization.

+
+

Note

+

API Key Required

+
+
+
Args:
+
+
organization_id: list of int
+
Filter events by a single or multiple organization ids
+
updated_since: str
+
Filter to events updated since given date (ISO Date)
+
+
+
Returns
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/ngpvan.html b/docs/html/ngpvan.html new file mode 100644 index 0000000000..f8084527c9 --- /dev/null +++ b/docs/html/ngpvan.html @@ -0,0 +1,2696 @@ + + + + + + + + + + + NGPVAN — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

NGPVANÂś

+
+

OverviewÂś

+

The VAN module leverages the VAN API and generally follows the naming convention of their API endpoints. It +is recommended that you reference their API documentation to +additional details and information.

+
+

Note

+
+
API Keys
+
    +
  • API Keys are specific to each committee and state.
  • +
  • There is a Parsons type API Key that can be requested via the Integrations menu on the main page. +If you have an issue gaining access to this key, or an admin has questions, please email +<parsons@movementcooperative.org>.
  • +
+
+
+
+
+

Warning

+
+
VANIDs
+
VANIDs are unique to each state and instance of the VAN. VANIDs used for the AV VAN will not match +those of the SmartVAN or VoteBuilder.
+
Maintenance & Suppoort
+
VAN/EveryAction is not responsible for support of Parsons. Their support team cannot answer questions +about Parsons. Please direct any questions
+
+
+
+
+
+
+

QuickStartÂś

+

To call the VAN class you can either store the api key as an environmental variable VAN_API_KEY or pass it in as an argument..

+
from parsons import VAN
+
+ van = VAN(db='MyVoters') # Specify the DB type and pass api key via environmental variable.
+
+ van = VAN(api_key='asdfa-sdfadsf-adsfasdf',db='MyVoters') # Pass api key directly
+
+
+

You can then call various endpoints:

+
from parsons import VAN
+
+ van = VAN(db='MyVoters')
+
+ # List events with a date filter
+ events = van.get_events(starting_before='2018-02-01')
+
+ # List all folders shared with API Key User
+ folders = van.get_folders()
+
+ # Return to a Redshift database
+ saved_lists = van.get_saved_lists().to_redshift('van.my_saved_lists')
+
+
+

This a is just a small sampling of all of the VAN endpoints that you can leverage. We recommend reviewing the +documentation for all functions.

+
+
+

Common WorkflowsÂś

+
+

Bulk ImportÂś

+

For some methods, VAN allows you to bulk import multiple records to create or modify them.

+

The bulk upload endpoint, requires access to file on the public internet as it runs the upload +asynchronously. Therefore, in order to bulk import, you must pass in cloud storage credentials +so that the file can be posted. Currently, only S3 is supported.

+

Bulk Apply Activist Codes

+
from parsons import VAN, Table
+
+van = VAN(db=EveryAction)
+
+# Load a table containing the VANID, activistcodeid and other options.
+tbl = Table.from_csv('new_volunteers.csv')
+
+# Table will be sent to S3 bucket and a POST request will be made to VAN creating
+# the bulk import job with all of the valid meta information. The method will
+# return the job id.
+job_id = van.bulk_apply_activist_codes(tbl, url_type="S3", bucket='my_bucket')
+
+# The bulk import job is run asynchronously, so you may poll the status of a job.
+job_status = van.get_bulk_import_job(job_id)
+
+
+
+
+

Scores: Loading and UpdatingÂś

+

Loading a score is a multi-step process. Once a score is set to approved, loading takes place overnight.

+

Standard Auto Approve Load

+
from parsons import VAN, Table
+
+van = VAN(db='MyVoters') # API key stored as an environmental variable
+
+# If you don't know the id, you can run van.get_scores() to list the
+# slots that are available and their associated score ids.
+score_id = 9999
+
+# Load the Parsons table with the scores. The first column of the table
+# must be the person id (e.g. VANID). You could create this from Redshift or
+# another source.
+tbl = Table.from_csv('winning_scores.csv')
+
+# Specify the score id slot and the column name for each score.
+config = [{'score_id': score_id, 'score_column': 'winning_model'}]
+
+# If you have multiple models in the same file, you can load them all at the same time.
+# In fact, VAN recommends that you do so to reduce their server loads.
+config = [{'score_id': 5555, 'score_column': 'score1'}, {'score_id': 5556, 'score_column': 'score2'}]
+
+# The score file must posted to the internet. This configuration uses S3 to do so. In this
+# example, your S3 keys are stored as environmental variables. If not, you can pass them
+# as arguments.
+job_id = van.upload_scores(tbl, config, url_type='S3', email='info@tmc.org', bucket='tmc-fake')
+
+
+

Standard Load Requiring Approval

+
from parsons import VAN
+
+van = VAN(db='MyVoters') # API key stored as an environmental variable
+config = [{'score_id': 3421, 'score_column': 'winning_model'}]
+
+# Note that auto_approve is set to False. This means that you need to manually approve
+# the job once it is loaded.
+job_id = van.upload_scores(tbl, config, url_type='S3', email='info@tmc.org',
+                           bucket='tmc-fake', auto_approve=False)
+
+# Approve the job
+van.update_score_status(job_id,'approved')
+
+
+
+
+

People: Add Survey ResponseÂś

+

The following workflow can be used to apply survey questions, activist codes +and canvass responses.

+
from parsons import VAN
+
+# Instantiate Class
+van = VAN(db="MyVoters")
+
+van_id = 13242
+sq = 311838 # Valid survey question id
+sr = 1288926 # Valid survey response id
+ct = 36 # Valid contact type id
+it = 4 # Valid input type id
+
+# Create a valid survey question response
+van.apply_survey_response(vanid, sq, sr, contact_type_id=ct, input_type_id=it)
+
+
+
+
+

Event: Creating and ModifyingÂś

+

Events are made up of sub objects that need to exist to create an event

+
    +
  • Event Object - The event itself
  • +
  • Event Type - The type of event, such as a Canvass or Phone Bank. These are created +in the VAN UI and can be reused for multiple events.
  • +
  • Locations - An event can have multiple locations. While not required to initially create an +event, these are required to add signups to an event.
  • +
  • Roles - The various roles that a person can have at an event, such as Lead or +Canvasser. These are set as part of the event type.
  • +
  • Shifts - Each event can have multiple shits in which a person can be assigned. These are +specified in the event creation.
  • +
+
from parsons import VAN
+
+# Instantiate class
+van = VAN(db="EveryAction")
+
+# Create A Location
+loc_id = van.location(name='Big `Ol Canvass', address='100 W Washington', city='Chicago', state='IL')
+
+# Create Event
+name = 'GOTV Canvass' # Name of event
+short_name = 'GOTVCan' # Short name of event, 12 chars or less
+start_time = '2018-11-01T15:00:00' # ISO formatted date
+end_time = '2018-11-01T18:00:00' # ISO formatted date after start time
+event_type_id = 296199 # A valid event type id
+roles = [259236] # A list of valid role ids
+location_ids = [loc_id] # An optional list of locations ids for the event
+description = 'CPD Super Volunteers Canvass' # Optional description of 200 chars or less
+shifts = [{'name': 'Shift 1',
+           'start_time': '2018-11-01T15:00:00',
+           'end_time': '2018-11-11T17:00:00'}] # Shifts must fall within event start/end time.
+
+new_event = van.event_create(name, short_name, start_time, end_time, event_type_id, roles,
+                             location_ids=location_ids, shifts=shifts, description=description)
+
+
+
+
+

Signup: Adding and ModifyingÂś

+
from parsons import VAN
+
+# Instantiate class
+van = VAN(db="EveryAction")
+
+# Create a new signup
+
+vanid = 100349920
+event_id = 750001004
+shift_id = 19076
+role_id = 263920
+location_id = 3
+role_id = 263920
+status_id = 11
+
+# Create the signup. Will return a signup id
+signup_id  = van.signup_create(vanid, event_id, shift_id, role_id, status_id, location_id
+
+# Modify a status of the signup
+new_status_id = 6
+van.signup_update(signup_id, status_id=new_status_id)
+
+
+
+
+
+

APIÂś

+
+

PeopleÂś

+
+
+class parsons.ngpvan.van.People(van_connection)[source]Âś
+
+
+find_person(first_name=None, last_name=None, date_of_birth=None, email=None, phone=None, phone_type=None, street_number=None, street_name=None, zip=None)[source]Âś
+

Find a person record.

+
+

Note

+

Person find must include the following minimum combinations to conduct +a search.

+
    +
  • first_name, last_name, email
  • +
  • first_name, last_name, phone
  • +
  • first_name, last_name, zip5, date_of_birth
  • +
  • first_name, last_name, street_number, street_name, zip5
  • +
  • email_address
  • +
+
+
+
Args:
+
+
first_name: str
+
The person’s first name
+
last_name: str
+
The person’s last name
+
dob: str
+
ISO 8601 formatted date of birth (e.g. 1981-02-01)
+
email: str
+
The person’s email address
+
phone: str
+
Phone number of any type (Work, Cell, Home)
+
street_number: str
+
Street Number
+
street_name: str
+
Street Name
+
zip: str
+
5 digit zip code
+
+
+
Returns:
+
A person dict object
+
+
+ +
+
+find_person_json(match_json)[source]Âś
+

Find a person record based on json data.

+
+

Note

+

Person find must include the following minimum combinations to conduct +a search.

+
    +
  • first_name, last_name, email
  • +
  • first_name, last_name, phone
  • +
  • first_name, last_name, zip5, date_of_birth
  • +
  • first_name, last_name, street_number, street_name, zip5
  • +
  • email_address
  • +
+
+
+

Note

+

A full list of possible values for the json, and its structure can be found +here.

+
+
+
Args:
+
+
match_json: dict
+
A dictionary of values to match against.
+
+

fields: The fields to return. Leave as default for all available fields

+
+
Returns:
+
A person dict object
+
+
+ +
+
+update_person(id=None, id_type='vanid', first_name=None, last_name=None, date_of_birth=None, email=None, phone=None, phone_type=None, street_number=None, street_name=None, zip=None)[source]Âś
+

Update a person record based on a provided ID. All other arguments provided will be +updated on the record.

+
+

Warning

+

This method can only be run on MyMembers, EveryAction, MyCampaign databases.

+
+
+
Args:
+
+
id: str
+
A valid id
+
id_type: str
+
A known person identifier type available on this VAN instance. +Defaults to vanid.
+
first_name: str
+
The person’s first name
+
last_name: str
+
The person’s last name
+
dob: str
+
ISO 8601 formatted date of birth (e.g. 1981-02-01)
+
email: str
+
The person’s email address
+
phone: str
+
Phone number of any type (Work, Cell, Home)
+
phone_type: str
+
One of ‘H’ for home phone, ‘W’ for work phone, ‘C’ for cell, ‘M’ for +main phone or ‘F’ for fax line. Defaults to home phone.
+
street_number: str
+
Street Number
+
street_name: str
+
Street Name
+
zip: str
+
5 digit zip code
+
+
+
Returns:
+
A person dict
+
+
+ +
+
+update_person_json(id, id_type='vanid', match_json=None)[source]Âś
+

Update a person record based on a provided ID within the match_json dict.

+
+

Note

+

A full list of possible values for the json, and its structure can be found +here.

+
+
+
Args:
+
+
id: str
+
A valid id
+
id_type: str
+
A known person identifier type available on this VAN instance. +Defaults to vanid.
+
match_json: dict
+
A dictionary of values to match against and save.
+
+
+
Returns:
+
A person dict
+
+
+ +
+
+upsert_person(first_name=None, last_name=None, date_of_birth=None, email=None, phone=None, phone_type=None, street_number=None, street_name=None, zip=None)[source]Âś
+

Create or update a person record.

+
+

Note

+

Person find must include the following minimum combinations. +- first_name, last_name, email +- first_name, last_name, phone +- first_name, last_name, zip5, date_of_birth +- first_name, last_name, street_number, street_name, zip5 +- email_address

+
+
+

Warning

+

This method can only be run on MyMembers, EveryAction, MyCampaign databases.

+
+
+
Args:
+
+
first_name: str
+
The person’s first name
+
last_name: str
+
The person’s last name
+
dob: str
+
ISO 8601 formatted date of birth (e.g. 1981-02-01)
+
email: str
+
The person’s email address
+
phone: str
+
Phone number of any type (Work, Cell, Home)
+
phone_type: str
+
One of ‘H’ for home phone, ‘W’ for work phone, ‘C’ for cell, ‘M’ for +main phone or ‘F’ for fax line. Defaults to home phone.
+
street_number: str
+
Street Number
+
street_name: str
+
Street Name
+
zip: str
+
5 digit zip code
+
+
+
Returns:
+
A person dict
+
+
+ +
+
+upsert_person_json(match_json)[source]Âś
+

Create or update a person record.

+
+

Note

+

Person find must include the following minimum combinations. +- first_name, last_name, email +- first_name, last_name, phone +- first_name, last_name, zip5, date_of_birth +- first_name, last_name, street_number, street_name, zip5 +- email_address

+
+
+

Note

+

A full list of possible values for the json, and its structure can be found +here. vanId can +be passed to ensure the correct record is updated.

+
+
+

Warning

+

This method can only be run on MyMembers, EveryAction, MyCampaign databases.

+
+
+
Args:
+
+
match_json: dict
+
A dictionary of values to match against and save.
+
+
+
Returns:
+
A person dict
+
+
+ +
+
+get_person(id, id_type='vanid', expand_fields=['contribution_history', 'addresses', 'phones', 'emails', 'codes', 'custom_fields', 'external_ids', 'preferences', 'recorded_addresses', 'reported_demographics', 'suppressions', 'cases', 'custom_properties', 'districts', 'election_records', 'membership_statuses', 'notes', 'organization_roles', 'disclosure_field_values'])[source]Âś
+

Returns a single person record using their VANID or external id.

+
+
Args:
+
+
id: str
+
A valid id
+
id_type: str
+
A known person identifier type available on this VAN instance +such as dwid. Defaults to vanid.
+
expand_fields: list
+
A list of fields for which to include data. If a field is omitted, +None will be returned for that field. Can be contribution_history, +addresses, phones, emails, codes, custom_fields, +external_ids, preferences, recorded_addresses, +reported_demographics, suppressions, cases, custom_properties, +districts, election_records, membership_statuses, notes, +organization_roles, scores, disclosure_field_values.
+
+
+
Returns:
+
A person dict
+
+
+ +
+
+apply_canvass_result(id, result_code_id, id_type='vanid', contact_type_id=None, input_type_id=None, date_canvassed=None)[source]Âś
+

Apply a canvass result to a person. Use this end point for attempts that do not +result in a survey response or an activist code (e.g. Not Home).

+
+
Args:
+
+
id: str
+
A valid person id
+
result_code_id : int
+
Specifies the result code of the attempt. Valid ids can be found +by using the get_canvass_responses_result_codes()
+
id_type: str
+
A known person identifier type available on this VAN instance +such as dwid
+
contact_type_id : int
+
Optional; A valid contact type id
+
input_type_id : int
+
Optional; Defaults to 11 (API Input)
+
date_canvassed : str
+
Optional; ISO 8601 formatted date. Defaults to todays date
+
+
+
Returns:
+
None
+
+
+ +
+
+toggle_volunteer_action(id, volunteer_activity_id, action, id_type='vanid', result_code_id=None, contact_type_id=None, input_type_id=None, date_canvassed=None)[source]Âś
+

Apply or remove a volunteer action to or from a person.

+
+
Args:
+
+
id: str
+
A valid person id
+
id_type: str
+
A known person identifier type available on this VAN instance +such as dwid
+
volunteer_activity_id: int
+
A valid volunteer activity id
+
action: str
+
Either ‘apply’ or ‘remove’
+
result_code_id : int
+
Optional; Specifies the result code of the response. If +not included,responses must be specified. Conversely, if +responses are specified, result_code_id must be null. Valid ids +can be found by using the get_canvass_responses_result_codes()
+
contact_type_id: int
+
Optional; A valid contact type id
+
input_type_id: int
+
Optional; Defaults to 11 (API Input)
+
date_canvassed: str
+
Optional; ISO 8601 formatted date. Defaults to todays date
+
+
+
+

** NOT IMPLEMENTED **

+
+ +
+
+apply_response(id, response, id_type='vanid', contact_type_id=None, input_type_id=None, date_canvassed=None, result_code_id=None)[source]Âś
+

Apply responses such as survey questions, activist codes, and volunteer actions +to a person record. This method allows you apply multiple responses (e.g. two survey +questions) at the same time. It is a low level method that requires that you +conform to the VAN API response object format.

+
+
Args:
+
+
id: str
+
A valid person id
+
response: dict
+
A list of dicts with each dict containing a valid action.
+
id_type: str
+
A known person identifier type available on this VAN instance +such as dwid
+
result_code_id : int
+
Optional; Specifies the result code of the response. If +not included,responses must be specified. Conversely, if +responses are specified, result_code_id must be null. Valid ids +can be found by using the get_canvass_responses_result_codes()
+
contact_type_id : int
+
Optional; A valid contact type id
+
input_type_id : int
+
Optional; Defaults to 11 (API Input)
+
date_canvassed : str
+
Optional; ISO 8601 formatted date. Defaults to todays date
+
+

responses : list or dict

+
+
Returns:
+
True if successful
+
+
response = [{"activistCodeId": 18917,
+             "action": "Apply",
+             "type": "ActivistCode"},
+            {"surveyQuestionId": 109149,
+             "surveyResponseId": 465468,
+             "action": "SurveyResponse"}
+            ]
+van.apply_response(5222, response)
+
+
+
+ +
+
+create_relationship(vanid_1, vanid_2, relationship_id)[source]Âś
+

Create a relationship between two individuals

+
+
Args:
+
+
vanid_1 : int
+
The vanid of the primary individual; aka the node
+
vanid_2 : int
+
The vanid of the secondary individual; the spoke
+
relationship_id : int
+
The relationship id indicating the type of relationship
+
+
+
Returns:
+
None
+
+
+ +
+
+apply_person_code(id, code_id, id_type='vanid')[source]Âś
+

Apply a code to a person.

+
+
Args:
+
+
id: str
+
A valid person id.
+
code_id: int
+
A valid code id.
+
id_type: str
+
A known person identifier type available on this VAN instance +such as dwid
+
+
+
Returns:
+
None
+
+
+ +
+ +
+
+

Activist CodesÂś

+
+
+class parsons.ngpvan.van.ActivistCodes(van_connection)[source]Âś
+
+
+get_activist_codes()[source]Âś
+

Get activist codes.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_activist_code(activist_code_id)[source]Âś
+

Get an activist code.

+
+
Args:
+
+
activist_code_id : int
+
The activist code id.
+
+
+
Returns:
+
+
dict
+
The activist code
+
+
+
+
+ +
+
+apply_activist_code(id, activist_code_id, id_type='vanid')[source]Âś
+

Apply an activist code to or from a person.

+
+
Args:
+
+
id: str
+
A valid person id
+
activist_code_id: int
+
A valid activist code id
+
action: str
+
Either ‘apply’ or ‘remove’
+
id_type: str
+
A known person identifier type available on this VAN instance +such as dwid
+
+
+
Returns:
+
None
+
+
+ +
+
+remove_activist_code(id, activist_code_id, id_type='vanid')[source]Âś
+

Remove an activist code to or from a person.

+
+
Args:
+
+
id: str
+
A valid person id
+
activist_code_id: int
+
A valid activist code id
+
action: str
+
Either ‘apply’ or ‘remove’
+
id_type: str
+
A known person identifier type available on this VAN instance +such as dwid
+
+
+
Returns:
+
None
+
+
+ +
+ +
+
+

Bulk ImportÂś

+
+
+class parsons.ngpvan.van.BulkImport[source]Âś
+
+
+get_bulk_import_resources()[source]Âś
+

Get bulk import resources that available to the user. These define +the types of bulk imports that you can run. These might include +Contacts, ActivistCodes, ContactsActivistCodes and others.

+
+
Returns:
+
+
list
+
A list of resources.
+
+
+
+
+ +
+
+get_bulk_import_job(job_id)[source]Âś
+

Get a bulk import job status.

+
+
Args:
+
+
job_id : int
+
The bulk import job id.
+
+
+
Returns:
+
+
dict
+
The bulk import job
+
+
+
+
+ +
+
+get_bulk_import_mapping_types()[source]Âś
+

Get bulk import mapping types.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_bulk_import_mapping_type(type_name)[source]Âś
+

Get a single bulk import mapping type.

+
+
Args:
+
type_name: str
+
Returns:
+
+
dict
+
A mapping type json
+
+
+
+
+ +
+
+bulk_apply_activist_codes(tbl, url_type, **url_kwargs)[source]Âś
+

Bulk apply activist codes.

+

The table may include the following columns. The first column +must be vanid.

+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + +
Column NameRequiredDescription
vanidYesA valid VANID primary key
activistcodeidYesA valid activist code id
datecanvassedNoAn ISO formatted date
contacttypeidNoThe method of contact.
+
+
Args:
+
+
table: Parsons table
+
A Parsons table.
+
url_type: str
+
The cloud file storage to use to post the file. Currently only S3.
+
**url_kwargs: kwargs
+
+
Arguments to configure your cloud storage url type.
+
    +
  • S3 requires bucket argument and, if not stored as env variables +aws_access_key and aws_secret_access_key.
  • +
+
+
+
+
+
+
Returns:
+
+
int
+
The bulk import job id
+
+
+
+
+ +
+ +
+
+

Canvass ResponsesÂś

+
+
+class parsons.ngpvan.van.CanvassResponses(van_connection)[source]Âś
+
+
+get_canvass_responses_contact_types()[source]Âś
+

Get canvass response contact types.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_canvass_responses_input_types()[source]Âś
+

Get canvass response input types.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_canvass_responses_result_codes()[source]Âś
+

Get canvass response result codes.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

Changed EntitiesÂś

+
+
+class parsons.ngpvan.van.ChangedEntities[source]Âś
+
+
+get_changed_entity_resources()[source]Âś
+

Get changed entity resources available to the API user.

+
+
Returns:
+
list
+
+
+ +
+
+get_changed_entity_resource_fields(resource_type)[source]Âś
+

Get export fields avaliable for each changed entity resource.

+
+
Args:
+
resource_type: str
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

CodesÂś

+
+
+class parsons.ngpvan.van.Codes(van_connection)[source]Âś
+
+
+get_codes(name=None, supported_entities=None, parent_code_id=None, code_type=None)[source]Âś
+

Get codes.

+
+
Args:
+
+
name : str
+
Filter by name of code.
+
supported_entities: str
+
Filter by supported entities.
+
parent_code_id: str
+
Filter by parent code id.
+
code_type: str
+
Filter by code type.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_code(code_id)[source]Âś
+

Get a code.

+
+
Args:
+
+
code_id : int
+
The code id.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_code_types()[source]Âś
+

Get code types.

+
+
Returns:
+
+
list
+
A list of code types.
+
+
+
+
+ +
+
+create_code(name=None, parent_code_id=None, description=None, code_type='SourceCode', supported_entities=None)[source]Âś
+

Create a code.

+
+
Args:
+
+
name: str
+
The name of the code.
+
parent_code_id: int
+
A unique identifier for this code’s parent.
+
description: str
+
A description for this code, no longer than 200 characters.
+
code_type: str
+
The code type. Tag and SourceCode are valid values.
+
supported_entities: list
+

A list of dicts that enumerate the searchability and applicability rules of the +code. You can find supported entities with the code_supported_entities()

+
[
+    {
+     'name': 'Event',
+     'is_searchable': True,
+     'is_applicable': True
+    }
+    {
+     'name': 'Locations',
+     'start_time': '12-31-2018T13:00:00',
+     'end_time': '12-31-2018T14:00:00'
+    }
+]
+
+
+
+
+
+
+
+ +
+
+update_code(code_id, name=None, parent_code_id=None, description=None, code_type='SourceCode', supported_entities=None)[source]Âś
+

Update a code.

+
+
Args:
+
+
code_id: int
+
The code id.
+
name: str
+
The name of the code.
+
parent_code_id: int
+
A unique identifier for this code’s parent.
+
description: str
+
A description for this code, no longer than 200 characters.
+
code_type: str
+
The code type. Tag and SourceCode are valid values.
+
supported_entities: list
+

A list of dicts that enumerate the searchability and applicability rules of the +code. You can find supported entities with the code_supported_entities()

+
[
+    {
+     'name': 'Event',
+     'is_searchable': True,
+     'is_applicable': True
+    }
+    {
+     'name': 'Locations',
+     'start_time': '12-31-2018T13:00:00',
+     'end_time': '12-31-2018T14:00:00'
+    }
+]
+
+
+
+
+
+
+
+ +
+
+delete_code(code_id)[source]Âś
+

Delete a code.

+
+
Args:
+
+
code_id: int
+
The code id.
+
+
+
Returns:
+
None
+
+
+ +
+
+get_code_supported_entities()[source]Âś
+

Get code supported entities.

+
+
Returns:
+
+
list
+
A list of code supported entities.
+
+
+
+
+ +
+ +
+
+

Custom FieldsÂś

+
+
+class parsons.ngpvan.van.CustomFields(van_connection)[source]Âś
+
+
+get_custom_fields(field_type='contacts')[source]Âś
+

Get custom fields.

+
+
Args:
+
+
field_type : str
+
Filter by custom field group type. Must be one of contacts or +contributions.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_custom_fields_values(field_type='contacts')[source]Âś
+

Get custom field values as a long table.

+
+
Args:
+
+
field_type : str
+
Filter by custom field group type. Must be one of contacts or +contributions.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_custom_field(custom_field_id)[source]Âś
+

Get a custom field.

+
+
Args:
+
+
custom_field_id: int
+
A valid custom field id.
+
+
+
Returns:
+
A json.
+
+
+ +
+ +
+
+

EventsÂś

+
+
+class parsons.ngpvan.van.Events(van_connection)[source]Âś
+
+
+get_events(code_ids=None, event_type_ids=None, rep_event_id=None, starting_after=None, starting_before=None, district_field=None, expand_fields=['locations', 'codes', 'shifts', 'roles', 'notes', 'financialProgram', 'ticketCategories', 'onlineForms'])[source]Âś
+

Get events.

+
+
Args:
+
+
code_ids: str
+
Filter by code id.
+
event_type_ids: str
+
Filter by event_type_ids.
+
rep_event_id: str
+
Filters to recurring events that are recurrences the passed event id.
+
starting_after: str
+
Events beginning after iso8601 formatted date.
+
starting_before: str
+
Events beginning before iso8601 formatted date.
+
district_field: str
+
Filter by district field.
+
expand_fields: list
+
A list of fields for which to include data. If a field is omitted, +None will be returned for that field. Can be locations, codes, +shifts,``roles``, notes, financialProgram, ticketCategories, +onlineForms.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_event(event_id, expand_fields=['locations', 'codes', 'shifts', 'roles', 'notes', 'financialProgram', 'ticketCategories', 'voterRegistrationBatches'])[source]Âś
+

Get an event.

+
+
Args:
+
+
event_id: int
+
The event id.
+
expand_fields: list
+
A list of fields for which to include data. If a field is omitted, +None will be returned for that field. Can be locations, +codes, shifts, roles, notes, financialProgram, +ticketCategories, ``voterRegistrationBatches`.`
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+create_event(name, short_name, start_date, end_date, event_type_id, roles, shifts=None, description=None, editable=False, publicly_viewable=False, location_ids=None, code_ids=None, notes=None, district_field_value=None, voter_registration_batches=None)[source]Âś
+

Create an event

+
+
Args:
+
+
name: str
+
A name for this event, no longer than 500 characters.
+
short_name: str
+
A shorter name for this event, no longer than 12 characters.
+
start_date: str
+
The start date and time for this event.
+
end_date: str
+
The end date and time for this event that is after start_date
+
event_type_id: int
+
A valid event type id.
+
roles: list
+
A list of valid role ids that correspond the with the event type.
+
shifts:
+

A list of dicts with shifts formatted as:

+
[
+    {
+     'name': 'Shift 1',
+     'start_time': '12-31-2018T12:00:00',
+     'end_time': '12-31-2018T13:00:00'
+    }
+    {
+     'name': 'Shift 2',
+     'start_time': '12-31-2018T13:00:00',
+     'end_time': '12-31-2018T14:00:00'
+    }
+]
+
+
+
+
description: str
+
An optional description for this Event, no longer than 500 characters.
+
editable: boolean
+
If True, prevents modification of this event by any users other than the +user associated the API key. Setting this to true effectively makes +the event read-only in the VAN interface.
+
publicly_viewable: boolean
+
Used by NGP VAN’s website platform to indicate whether this event can be +viewed publicly.
+
location_ids: list
+
A list of location_ids where the event is taking place
+
code_ids: list
+
A list of codes that are applied to this event for organizational purposes. Note +that at most one source code and any number of tags, may be applied to an event.
+
notes: list
+
A list of notes
+
+
+
Returns:
+
+
int
+
The event code.
+
+
+
+
+ +
+
+delete_event(event_id)[source]Âś
+

Delete an event.

+
+
Args:
+
+
event_id: int
+
The event id.
+
+
+
Returns:
+
None
+
+
+ +
+
+add_event_shift(event_id, shift_name, start_time, end_time)[source]Âś
+

Add shifts to an event

+
+
Args:
+
+
event_id: int
+
The event id.
+
shift_name: str
+
The name of the shift
+
start_time: str
+
The start time for the shift (iso8601 formatted date).
+
end_time: str
+
The end time of the shift (iso8601 formatted date).
+
+
+
Returns:
+
+
int
+
The shift id.
+
+
+
+
+ +
+
+get_event_types()[source]Âś
+

Get event types.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

Export JobsÂś

+
+
+class parsons.ngpvan.van.ExportJobs(van_connection)[source]Âś
+
+
+get_export_job_types()[source]Âś
+

Get export job types

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+export_job_create(list_id, export_type=4, webhookUrl='https://www.nothing.com')[source]Âś
+

Creates an export job

+

Currently, this is only used for exporting saved lists. It is +recommended that you use the saved_list_download() method +instead.

+
+
Args:
+
+
list_id: int
+
This is where you should input the list id
+
export_type: int
+
The export type id, which defines the columns to export
+
webhookUrl:
+
A webhook to include to notify as to the status of the export
+
+
+
Returns:
+
+
dict
+
The export job object
+
+
+
+
+ +
+
+get_export_job(export_job_id)[source]Âś
+

Get an export job.

+
+
Args:
+
+
export_job_id: int
+
The xxport job id.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

File Loading JobsÂś

+
+
+class parsons.ngpvan.van.FileLoadingJobs(van_connection)[source]Âś
+
+
+create_file_load(file_name, file_url, columns, id_column, id_type, score_id, score_column, delimiter='csv', header=True, quotes=True, description=None, email=None, auto_average=None, auto_tolerance=None)[source]Âś
+
+

Warning

+
+

Deprecated since version 0.7: Use parsons.VAN.upload_scores() instead.

+
+
+

Loads a file. Only used for loading scores at this time. Scores must be +compressed using zip.

+
+
Args:
+
+
file_name: str
+
The name of the file contained in the zip file.
+
file_url: str
+
The url path to directly download the file. Can also be a path to an FTP site.
+
columns: list
+
A list of column names contained in the file.
+
id_column: str
+
The column name of the id column in the file.
+
id_type: str
+
A valid primary key, such as VANID or DWID. Varies by VAN instance.
+
score_id: int
+
The score slot id
+
score_column: str
+
The column holding the score
+
delimiter: str
+
The file delimiter used.
+
email: str
+
A valid email address in which file loading status will be sent.
+
auto_average: float
+
The average of scores to be loaded.
+
auto_tolerance: float
+
The fault tolerance of the VAN calculated average compared to the auto_average. +The tolerance must be less than 10% of the difference between the maximum and +minimum possible acceptable values of the score.
+
+
+
Returns:
+
+
dict
+
The file load id
+
+
+
+
+ +
+
+create_file_load_multi(file_name, file_url, columns, id_column, id_type, score_map, delimiter='csv', header=True, quotes=True, description=None, email=None)[source]Âś
+
+

Warning

+
+

Deprecated since version 0.7: Use parsons.VAN.upload_scores() instead.

+
+
+

An iteration of the file_load() method that allows you to load multiple scores +at the same time.

+
+
Args:
+
+
file_name : str
+
The name of the file contained in the zip file.
+
file_url : str
+
The url path to directly download the file. Can also be a path to an FTP site.
+
columns: list
+
A list of column names contained in the file.
+
id_column : str
+
The column name of the id column in the file.
+
id_type : str
+
A valid primary key, such as VANID or DWID. Varies by VAN instance.
+
score_map : list
+

A list of dicts that adheres to the following syntax

+
[{'score_id' : int,
+  'score_column': str,
+  'auto_average': float,
+  'auto_tolerance': float }]
+
+
+
+
email: str
+
A valid email address in which file loading status will be sent.
+
+
+
Returns:
+
The file load job id
+
+
+ +
+ +
+
+

FoldersÂś

+
+

Note

+

A folder must be shared with the user associated with your API key to +be listed.

+
+
+
+class parsons.ngpvan.van.Folders(van_connection)[source]Âś
+
+
+get_folders()[source]Âś
+

Get all folders owned or shared with the API user.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_folder(folder_id)[source]Âś
+

Get a folder owned by or shared with the API user.

+
+
Args:
+
+
folder_id: int
+
The folder id.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

LocationsÂś

+
+
+class parsons.ngpvan.van.Locations(van_connection)[source]Âś
+
+
+get_locations(name=None)[source]Âś
+

Get locations.

+
+
Args:
+
+
name: str
+
Filter locations by name.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_location(location_id)[source]Âś
+

Get a location.

+
+
Args:
+
+
location_id: int
+
The location id.
+
+
+
Returns:
+
dict
+
+
+ +
+
+create_location(name, address_line1=None, address_line2=None, city=None, state=None, zip_code=None)[source]Âś
+

Find or create a location. If location already exists, will return location id.

+
+
Args:
+
+
name: str
+
A name for this location, no longer than 50 characters.
+
address_line1: str
+
First line of a street address.
+
address_line2: str
+
Second line of a street address.
+
city: str
+
City or town name.
+
state: str
+
Two or three character state or province code (e.g., MN, ON, NSW, etc.).
+
zip_code: str
+
ZIP, ZIP+4, Postal Code, Post code, etc.
+
Returns:
+
+
int
+
A location id.
+
+
+
+
+
+
+ +
+
+delete_location(location_id)[source]Âś
+

Delete a location.

+
+
Args:
+
+
location_id: int
+
The location id
+
+
+
Returns:
+
None
+
+
+ +
+ +
+
+

Saved ListsÂś

+
+

Note

+

A saved list must be shared with the user associated with your API key to +be listed.

+
+
+
+class parsons.ngpvan.van.SavedLists(van_connection)[source]Âś
+
+
+get_saved_lists(folder_id=None)[source]Âś
+

Get saved lists.

+
+
Args:
+
+
folder_id: int
+
Filter by the id for a VAN folder. If included returns only +the saved lists in the folder
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_saved_list(saved_list_id)[source]Âś
+

Returns a saved list object.

+
+
Args:
+
+
saved_list_id: int
+
The saved list id.
+
+
+
Returns:
+
dict
+
+
+ +
+
+download_saved_list(saved_list_id)[source]Âś
+

Download the vanids associated with a saved list.

+
+
Args:
+
+
saved_list_id: int
+
The saved list id.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+upload_saved_list(tbl, list_name, folder_id, url_type, id_type='vanid', replace=False, **url_kwargs)[source]Âś
+

Upload a saved list. Invalid or unmatched person id records will be ignored. Your api user +must be shared on the target folder.

+
+
Args:
+
+
tbl: parsons.Table
+
A parsons table object containing one column of person ids.
+
list_name: str
+
The saved list name.
+
folder_id: int
+
The folder id where the list will be stored.
+
url_post_type: str
+
The cloud file storage to use to post the file. Currently only S3.
+
id_type: str
+
The primary key type. The options, beyond vanid are specific to your +instance of VAN.
+
replace: boolean
+
Replace saved list if already exists.
+
**url_kwargs: kwargs
+
+
Arguments to configure your cloud storage url type.
+
    +
  • S3 requires bucket argument and, if not stored as env variables +aws_access_key and aws_secret_access_key.
  • +
+
+
+
+
+
+
Returns:
+
+
dict
+
Upload results information included the number of matched and saved +records in your list.
+
+
+
+
+ +
+ +
+
+

ScoresÂś

+

Prior to loading a score for the first time, you must contact VAN support to request +a score slot.

+
+

Note

+
+
Score Auto Approval
+
Scores can be automatically set to approved through the VAN.upload_scores() +method allowing you to skip calling VAN.update_score_status(), if the average of +the scores is within the fault tolerance specified by the user. It is only available +to API keys with permission to automatically approve scores.
+
+
+
+
+class parsons.ngpvan.van.Scores(van_connection)[source]Âś
+
+
+get_scores()[source]Âś
+

Get all scores.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_score(score_id)[source]Âś
+

Get an individual score.

+
+
Args:
+
+
score_id: int
+
The score id
+
+
+
Returns:
+
dict
+
+
+ +
+
+get_score_updates(created_before=None, created_after=None, score_id=None)[source]Âś
+

Get score updates.

+
+
Args:
+
+
created_before: str
+
Filter score updates to those created before date. Use “YYYY-MM-DD” +format.
+
created_after: str
+
Filter score updates to those created after date. Use “YYYY-MM-DD” +format.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_score_update(score_update_id)[source]Âś
+

Get a score update object

+
+
+
Args:
+
+
score_update_id : int
+
The score update id
+
+
+
Returns:
+
dict
+
+
+
+ +
+
+update_score_status(score_update_id, status)[source]Âś
+

Change the status of a score update object. This end point is used to +approve a score loading job.

+
+
Args:
+
+
score_update_id: str
+
The score update id
+
status: str
+
One of ‘pending approval’, ‘approved’, ‘disapproved’
+
+
+
Returns:
+
None
+
+
+ +
+
+upload_scores(tbl, config, url_type, id_type='vanid', email=None, auto_approve=True, approve_tolerance=0.1, **url_kwargs)[source]Âś
+

Upload scores. Use to create or overwrite scores. Multiple score loads +should be configured in a single call. [1]

+
+
Args:
+
+
tbl: object
+
A parsons.Table object. The table must contain the scores and first column in the +table must contain the primary key (e.g. vanid).
+
config: list
+

The score configuration. A list of dictionaries in which you specify the following

+ ++++ + + + + + + + + +
score_columnThe name of the column where the score is housed.
score_idThe score slot id.
+

Example:

+
[{'score1_id' : int, score1_column': str}
+ {'score2_id' : int, score2_column': str}]
+
+
+
+
url_type: str
+
The cloud file storage to use to post the file. +See Cloud Storage for more details.
+
email: str
+
An email address to send job load status updates.
+
auto_approve: boolean
+
If the scores are within the expected tolerance of deviation from the +average values provided, then score will be automatically approved.
+
approve_tolderance: float
+
The deviation from the average scores allowed in order to automatically +approve the score. Maximum of .1.
+
**url_kwargs: kwargs
+
Arguments to configure your cloud storage url type. See +Cloud Storage for more details.
+
+
+
Returns:
+
+
int
+
The score load job id.
+
+
+
+ + + + + +
[1]NGPVAN asks that you load multiple scores in a single call to reduce the load +on their servers.
+
+ +
+ +
+
+

SignupsÂś

+
+
+class parsons.ngpvan.van.Signups(van_connection)[source]Âś
+
+
+get_signups_statuses(event_id=None, event_type_id=None)[source]Âś
+

Get a list of valid signup statuses for a given event type +or event. You must pass one of event_id or event_type_id +but not both.

+
+
Args:
+
+
event_id: int
+
A valid event id.
+
event_type_id: int
+
A valid event type id.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_person_signups(vanid)[source]Âś
+

Get the signup history of a person.

+
+
Args:
+
+
vanid: int
+
A valid vanid associated with a person.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_event_signups(event_id)[source]Âś
+

Get the signup history of an event.

+
+
Args:
+
+
event_id: int
+
A valid event_id associated with an event
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_signup(event_signup_id)[source]Âś
+

Get a single signup object.

+
+
Args:
+
+
event_signup_id: int
+
A valid event_signup_id associated with a signup.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+create_signup(vanid, event_id, shift_id, role_id, status_id, location_id)[source]Âś
+

Create a new signup for an event.

+
+
Args:
+
+
vanid: int
+
A valid vanid of the person to signup for the event.
+
event_id: int
+
A valid event_id to associate the person with the event
+
shift_id:
+
A shift_id, associated with the event to assign the person
+
role_id:
+
A role_id, associated with the event to assign the person
+
status_id:
+
A status_id of the person
+
location_id:
+
A location_id for the event
+
+
+
Returns:
+
+
Int
+
The event signup id
+
+
+
+
+ +
+
+update_signup(event_signup_id, shift_id=None, role_id=None, status_id=None, location_id=None)[source]Âś
+

Update a signup object. All of the kwargs will update the values associated +with them.

+
+
Args:
+
+
event_signup_id: int
+
A valid event signup id
+
shift_id: int
+
The shift_id to update
+
role_id: int
+
The role_id to update
+
status_id: int
+
The status_id to update
+
location_id: int
+
The location_id to update
+
+
+
Returns:
+
None
+
+
+ +
+
+delete_signup(event_signup_id)[source]Âś
+

Delete a signup object

+
+
Args:
+
+
event_signup_id: int
+
A valid event signup id
+
+
+
Returns:
+
None
+
+
+ +
+ +
+
+

Supporter GroupsÂś

+
+
+class parsons.ngpvan.van.SupporterGroups(van_connection)[source]Âś
+
+
+get_supporter_groups()[source]Âś
+

Get supporter groups.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_supporter_group(supporter_group_id)[source]Âś
+

Get a supporter group.

+
+
Args:
+
+
supporter_group_id: int
+
The supporter group id.
+
+
+
Returns:
+
dict
+
+
+ +
+
+create_supporter_group(name, description)[source]Âś
+

Create a new supporter group.

+
+
Args:
+
+
name: str
+
The name of the supporter group. 100 character limit
+
description: str
+
Optional; A description of the supporter group. 200 character limit.
+
+
+
Returns
+
Parsons Table with the newly createed supporter group id, name +and description
+
+
+ +
+
+delete_supporter_group(supporter_group_id)[source]Âś
+

Delete a supporter group.

+
+
Args:
+
+
supporter_group_id: int
+
The supporter group id
+
+
+
Returns:
+
None
+
+
+ +
+
+add_person_supporter_group(supporter_group_id, vanid)[source]Âś
+

Add a person to a supporter group

+
+
Args:
+
+
supporter_group_id: int
+
The supporter group id
+
vanid: int
+
The vanid of the person to apply
+
+
+
Returns:
+
None
+
+
+ +
+
+delete_person_supporter_group(supporter_group_id, vanid)[source]Âś
+

Remove a person from a supporter group

+
+
Args:
+
+
supporter_group_id: int
+
The supporter group id
+
vanid: int
+
The vanid of the person to remove
+
+
+
Returns:
+
None
+
+
+ +
+ +
+
+

Survey QuestionsÂś

+
+
+class parsons.ngpvan.van.SurveyQuestions(van_connection)[source]Âś
+
+
+get_survey_questions(statuses=['Active'], name=None, sq_type=None, question=None, cycle=None)[source]Âś
+

Get survey questions.

+
+
Args:
+
+
statuses: list
+
Filter to a list of statuses of survey questions. One or more of Active, +Archived, and Inactive.
+
name: str
+
Filter to survey questions with names begin with the input.
+
type: str
+
Filter to survey questions of a given type.
+
question: str
+
Filter to survey questions with script questions that contain the given input.
+
cycle: str
+
Filter to survey suestions with the given cycle. A year in the format “YYYY”.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_survey_question(survey_question_id)[source]Âś
+

Get a survey question.

+
+
Args:
+
+
survey_question_id: int
+
The survey question id.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+apply_survey_response(id, survey_question_id, survey_response_id, id_type='vanid', result_code_id=None, contact_type_id=None, input_type_id=None, date_canvassed=None)[source]Âś
+

Apply a single survey response to a person.

+
+
Args:
+
+
id: str
+
A valid person id
+
survey_question_id: int
+
A valid survey question id
+
survey_response_id: int
+
A valid survey response id
+
id_type: str
+
A known person identifier type available on this VAN instance +such as dwid
+
result_code_id : int
+
Optional; Specifies the result code of the response. If +not included,responses must be specified. Conversely, if +responses are specified, result_code_id must be null. Valid ids +can be found by using the get_canvass_responses_result_codes()
+
contact_type_id : int
+
Optional; A valid contact type id
+
input_type_id : int
+
Optional; Defaults to 11 (API Input)
+
date_canvassed : str
+
Optional; ISO 8601 formatted date. Defaults to todays date
+
+
+
+
+ +
+ +
+
+

TargetsÂś

+
+
+class parsons.ngpvan.van.Targets(van_connection)[source]Âś
+
+
+get_targets()[source]Âś
+

Get targets.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_target(target_id)[source]Âś
+

Get a single target.

+
+
Args:
+
+
target_id : int
+
The target id.
+
+
+
Returns:
+
+
dict
+
The target
+
+
+
+
+ +
+
+get_target_export(export_job_id)[source]Âś
+

Get specific target export job id’s status.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+create_target_export(target_id, webhook_url=None)[source]Âś
+

Create new target export job

+
+
Args:
+
+
target_id : int
+
The target id the export job is creating for.
+
+
+
Returns:
+
+
dict
+
The target export job ID
+
+
+
+
+ +
+ +
+
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/notifications.html b/docs/html/notifications.html new file mode 100644 index 0000000000..3f51feb21b --- /dev/null +++ b/docs/html/notifications.html @@ -0,0 +1,600 @@ + + + + + + + + + + + Notifications — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

NotificationsÂś

+
+

SlackÂś

+
+

OverviewÂś

+

The Slack module leverages the Slack API and provides way to easily send +notifications through Slack. It is recommended that you reference the +Slack API documentation for additional details and +information.

+
+

Note

+
+
API Tokens
+
    +
  • Slack API Tokens are required to use this module. To obtain an API +Token, create a Slack App associated +with the desired Slack workspace. Once you create the app, navigate +to ‘OAuth & Permissions’ and add the following OAuth scopes:

    +

    channels:read, users:read, chat:write, and files:write

    +

    You can now install the Slack App, which will produce an API Token. +Note that you can change the scopes any time, but you must reinstall +the app each time (your API Token will stay the same).

    +
  • +
  • Slack has rate limits on all its endpoints.

    +
  • +
+
+
+
+
+
+
+
+

QuickStartÂś

+

To call the Slack class you can either store the API Token as an environment +variable SLACK_API_TOKEN or pass it in as an argument.

+
from parsons import Slack
+
+slack = Slack() # Initiate class via environment variable api token
+
+slack = Slack(api_key='my-api-tkn') # Pass api token directly
+
+
+

You can then send messages:

+
from parsons import Slack
+
+slack = Slack()
+
+# send a simple messsage
+slack.message_channel("my_channel", "Hello from python script")
+
+# share a file
+slack.upload_file(["channel_1", "channel_2"], "my_slack_file.txt")
+
+
+
+
+

APIÂś

+
+
+class parsons.Slack(api_key=None)[source]Âś
+
+
+channels(fields=['id', 'name'], exclude_archived=False, types=['public_channel'])[source]Âś
+

Return a list of all channels in a Slack team.

+
+
Args:
+
+
fields: list
+
A list of the fields to return. By default, only the channel +id and name are returned. See +https://api.slack.com/methods/conversations.list for a full +list of available fields. Notes: nested fields are unpacked.
+
exclude_archived: bool
+
Set to True to exclude archived channels from the list. +Default is false.
+
types: list
+
Mix and match channel types by providing a list of any +combination of public_channel, private_channel, +mpim (aka group messages), or im (aka 1-1 messages).
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+users(fields=['id', 'name', 'deleted', 'profile_real_name_normalized', 'profile_email'])[source]Âś
+

Return a list of all users in a Slack team.

+
+
Args:
+
+
fields: list
+
A list of the fields to return. By default, only the user +id and name and deleted status are returned. See +https://api.slack.com/methods/users.list for a full list of +available fields. Notes: nested fields are unpacked.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+classmethod message(channel, text, webhook=None, parent_message_id=None)[source]Âś
+

Send a message to a Slack channel with a webhook instead of an api_key. +You might not have the full-access API key but still want to notify a channel +Args:

+
+
+
channel: str
+
The name or id of a public_channel, a private_channel, or +an im (aka 1-1 message).
+
text: str
+
Text of the message to send.
+
webhook: str
+
If you have a webhook url instead of an api_key +Looks like: https://hooks.slack.com/services/Txxxxxxx/Bxxxxxx/Dxxxxxxx
+
parent_message_id: str
+
The ts value of the parent message. If used, this will thread the message.
+
+
+
+ +
+
+message_channel(channel, text, as_user=False, parent_message_id=None)[source]Âś
+

Send a message to a Slack channel

+
+
Args:
+
+
channel: str
+
The name or id of a public_channel, a private_channel, or +an im (aka 1-1 message).
+
text: str
+
Text of the message to send.
+
as_user: str
+
Pass true to post the message as the authenticated user, +instead of as a bot. Defaults to false. See +https://api.slack.com/methods/chat.postMessage#authorship for +more information about Slack authorship.
+
parent_message_id: str
+
The ts value of the parent message. If used, this will thread the message.
+
+
+
Returns:
+
+
dict:
+
A response json
+
+
+
+
+ +
+
+upload_file(channels, filename, filetype=None, initial_comment=None, title=None, is_binary=False)[source]Âś
+

Upload a file to Slack channel(s).

+
+
Args:
+
+
channels: list
+
The list of channel names or IDs where the file will be shared.
+
filename: str
+
The path to the file to be uploaded.
+
filetype: str
+
A file type identifier. If None, type will be inferred base on +file extension. This is used to determine what fields are +available for that object. See https://api.slack.com/types/file +for a list of valid types and for more information about the +file object.
+
initial_comment: str
+
The text of the message to send along with the file.
+
title: str
+
Title of the file to be uploaded.
+
is_binary: bool
+
If True, open this file in binary mode. This is needed if +uploading binary files. Defaults to False.
+
+
+
Returns:
+
+
dict:
+
A response json
+
+
+
+
+ +
+ +
+

+
+
+

GmailÂś

+
+

OverviewÂś

+

The Gmail module leverages the Gmail API and provides an way to easily send +notifications through email. It is recommended that you reference the +Gmail API documentation for +additional details and information.

+
+

Note

+
+
Credentials and token
+
    +
  • Credentials are required to use the class
  • +
  • You will need to pass in the path to the credentials and to where a +generated token will be saved. Typically you’ll get the credentials from +the Google Developer Console (look for the “Gmail API”).
  • +
+
+
+
+
+

Note

+
+
6MB Attachment Size Limit
+
    +
  • Currently there is a limit of 6MB when sending attachments.
  • +
+
+
+
+
+
+
+
+

QuickStartÂś

+

To call the Gmail class you will need to pass in the path to a +credentials.json and the path to tokens.json.

+
from parsons import Gmail
+
+gmail = Gmail(
+   creds_path="~/secret_location/credentials.json",
+   token_path="~/secret_location/token.json")
+
+
+

The easiest way to send a message:

+
gmail.send_email(
+  "sender@email.com",
+  "recipient@email.com",
+  "The Subject",
+  "This is the text body of the email")
+
+
+

The current version also supports sending html emails and emails with +attachments.

+
gmail.send_email(
+  "sender@email.com",
+  "recipient@email.com",
+  "An html email with attachments",
+  "This is the text body of the email",
+  html="<p>This is the html part of the email</p>",
+  files=['file1.txt', 'file2.txt'])
+
+
+

Additionally, you can create a raw email messages and send it. See below for +more details.

+
+
+

APIÂś

+
+
+class parsons.Gmail(creds_path=None, token_path=None, user_id='me')[source]Âś
+

Create a Gmail object, for sending emails.

+
+
Args:
+
+
creds_path: str
+
The path to the credentials.json file.
+
token_path: str
+
The path to the token.json file.
+
user_id: str
+
Optional; Sender email address. Defaults to the special value +“me” which is used to indicate the authenticated user.
+
+
+
+
+
+send_email(sender, to, subject, message_text, message_html=None, files=None)Âś
+

Send an email message.

+
+
Args:
+
+
sender: str
+
Email address of the sender.
+
to: str or list
+
Email address(es) of the receiver(s). Must be in correct email +string syntax. For example, name@email.com or +“Name” <email@email.com>.
+
subject: str
+
The subject of the email message.
+
message_text: str
+
The text of the email message.
+
message_html: str
+
The html formatted text of the email message. If ommitted, the +email is sent a text-only body.
+
files: str or list
+
The path to the file(s) to be attached.
+
+
+
Returns:
+
None
+
+
+ +
+ +
+
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/objects.inv b/docs/html/objects.inv new file mode 100644 index 0000000000..925d55890f Binary files /dev/null and b/docs/html/objects.inv differ diff --git a/docs/html/p2a.html b/docs/html/p2a.html new file mode 100644 index 0000000000..de69e8a693 --- /dev/null +++ b/docs/html/p2a.html @@ -0,0 +1,494 @@ + + + + + + + + + + + Phone2Action — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Phone2ActionÂś

+
+

OverviewÂś

+

Phone2Action is a digital advocacy tool used by progressive organizations. This class +allows you to interact with the tool by leveraging their API.

+
+

Note

+
+
Authentication
+
You will need to email Phone2Action to request credentials to access the API. The credentials consist of an app ID and an app key.
+
+
+
+
+

Quick StartÂś

+

To instantiate the Phone2Action class, you can either pass in the app ID and app key as arguments or set the +PHONE2ACTION_APP_ID and PHONE2ACTION_APP_KEY environmental variables.

+
from parsons import Phone2Action
+
+# Instantiate the class using environment variables
+p2a = Phone2Action()
+
+# Get all advocates updated in the last day
+import datetime
+today = datetime.datetime.utcnow()
+yesterday = today - datetime.timedelta(days=1)
+
+# get_advocates returns a dictionary that maps the advocate data (e.g. phones) to a parsons
+# Table with the data for each advocate
+advocates_data = p2a.get_advocates(updated_since=yesterday)
+
+# For all of our advocates' phone numbers, opt them into SMS
+for phone in advocates_data['phones']:
+    phone_number = phone['phones_address']
+    # Only update phone numbers that aren't already subscribed
+    if phone['subscribed']:
+        p2a.update_advocate(phone['advocate_id'], phone=phone_number, sms_opt_in=True)
+
+
+
+
+

APIÂś

+
+
+class parsons.Phone2Action(app_id=None, app_key=None)[source]Âś
+

Instantiate Phone2Action Class

+
+
Args:
+
+
app_id: str
+
The Phone2Action provided application id. Not required if PHONE2ACTION_APP_ID +env variable set.
+
app_key: str
+
The Phone2Action provided application key. Not required if PHONE2ACTION_APP_KEY +env variable set.
+
+
+
Returns:
+
Phone2Action Class
+
+
+
+get_advocates(state=None, campaign_id=None, updated_since=None, page=None)[source]Âś
+

Return advocates (person records).

+

If no page is specified, the method will automatically paginate through the available +advocates.

+
+
Args:
+
+
state: str
+
Filter by US postal abbreviation for a state +or territory e.g., “CA” “NY” or “DC”
+
campaign_id: int
+
Filter to specific campaign
+
updated_since: str or int or datetime
+
Fetch all advocates updated since the date provided; this can be a datetime +object, a UNIX timestamp, or a date string (ex. ‘2014-01-05 23:59:43’)
+
page: int
+
Page number of data to fetch; if this is specified, call will only return one +page.
+
+
+
Returns:
+
+
A dict of parsons tables:
+
    +
  • emails
  • +
  • phones
  • +
  • memberships
  • +
  • tags
  • +
  • ids
  • +
  • fields
  • +
  • advocates
  • +
+
+
+
+
+
+ +
+
+get_campaigns(state=None, zip=None, include_generic=False, include_private=False, include_content=True)[source]Âś
+

Returns a list of campaigns

+
+
Args:
+
+
state: str
+
Filter by US postal abbreviation for a state or territory e.g., “CA” “NY” or “DC”
+
zip: int
+
Filter by 5 digit zip code
+
include_generic: boolean
+
When filtering by state or ZIP code, include unrestricted campaigns
+
include_private: boolean
+
If true, will include private campaigns in results
+
include_content: boolean
+
If true, include campaign content fields, which may vary. This may cause +sync errors.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+create_advocate(campaigns, first_name=None, last_name=None, email=None, phone=None, address1=None, address2=None, city=None, state=None, zip5=None, sms_optin=None, email_optin=None, sms_optout=None, email_optout=None, **kwargs)[source]Âś
+

Create an advocate.

+

If you want to opt an advocate into or out of SMS / email campaigns, you must provide +the email address or phone number (accordingly).

+

The list of arguments only partially covers the fields that can be set on the advocate. +For a complete list of fields that can be updated, see +the Phone2Action API documentation.

+
+
Args:
+
+
campaigns: list
+
The ID(s) of campaigns to add the advocate to
+
first_name: str
+
Optional; The first name of the advocate
+
last_name: str
+
Optional; The last name of the advocate
+
email: str
+
Optional; An email address to add for the advocate. One of email or phone +is required.
+
phone: str
+
Optional; An phone # to add for the advocate. One of email or phone is +required.
+
address1: str
+
Optional; The first line of the advocates’ address
+
address2: str
+
Optional; The second line of the advocates’ address
+
city: str
+
Optional; The city of the advocates address
+
state: str
+
Optional; The state of the advocates address
+
zip5: str
+
Optional; The 5 digit Zip code of the advocate
+
sms_optin: boolean
+
Optional; Whether to opt the advocate into receiving text messages; an SMS +confirmation text message will be sent. You must provide values for the phone +and campaigns arguments.
+
email_optin: boolean
+
Optional; Whether to opt the advocate into receiving emails. You must provide +values for the email and campaigns arguments.
+
sms_optout: boolean
+
Optional; Whether to opt the advocate out of receiving text messages. You must +provide values for the phone and campaigns arguments. Once an advocate is +opted out, they cannot be opted back in.
+
email_optout: boolean
+
Optional; Whether to opt the advocate out of receiving emails. You must +provide values for the email and campaigns arguments. Once an advocate is +opted out, they cannot be opted back in.
+
**kwargs:
+
Additional fields on the advocate to update
+
+
+
Returns:
+
The int ID of the created advocate.
+
+
+ +
+
+update_advocate(advocate_id, campaigns=None, email=None, phone=None, sms_optin=None, email_optin=None, sms_optout=None, email_optout=None, **kwargs)[source]Âś
+

Update the fields of an advocate.

+

If you want to opt an advocate into or out of SMS / email campaigns, you must provide +the email address or phone number along with a list of campaigns.

+

The list of arguments only partially covers the fields that can be updated on the advocate. +For a complete list of fields that can be updated, see +the Phone2Action API documentation.

+
+
Args:
+
+
advocate_id: integer
+
The ID of the advocate being updates
+
campaigns: list
+
Optional; The ID(s) of campaigns to add the user to
+
email: str
+
Optional; An email address to add for the advocate (or to use when opting in/out)
+
phone: str
+
Optional; An phone # to add for the advocate (or to use when opting in/out)
+
sms_optin: boolean
+
Optional; Whether to opt the advocate into receiving text messages; an SMS +confirmation text message will be sent. You must provide values for the phone +and campaigns arguments.
+
email_optin: boolean
+
Optional; Whether to opt the advocate into receiving emails. You must provide +values for the email and campaigns arguments.
+
sms_optout: boolean
+
Optional; Whether to opt the advocate out of receiving text messages. You must +provide values for the phone and campaigns arguments. Once an advocate is +opted out, they cannot be opted back in.
+
email_optout: boolean
+
Optional; Whether to opt the advocate out of receiving emails. You must +provide values for the email and campaigns arguments. Once an advocate is +opted out, they cannot be opted back in.
+
**kwargs:
+
Additional fields on the advocate to update
+
+
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/redshift.html b/docs/html/redshift.html new file mode 100644 index 0000000000..b71348ba5c --- /dev/null +++ b/docs/html/redshift.html @@ -0,0 +1,962 @@ + + + + + + + + + + + Redshift — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

RedshiftÂś

+
+

OverviewÂś

+

The Redshift class allows you to interact with an Amazon Redshift relational batabase. The Redshift Connector utilizes the psycopg2 python package to connect to the database.

+

In order to instantiate the class you must pass valid kwargs or store the following +environmental variables:

+
    +
  • 'REDSHIFT_USERNAME'
  • +
  • 'REDSHIFT_PASSWORD'
  • +
  • 'REDSHIFT_HOST'
  • +
  • 'REDSHIFT_DB'
  • +
  • 'REDSHIFT_PORT'
  • +
+
+

Note

+
+
S3 Credentials
+
Redshift only allows data to be copied to the database via S3. As such, you need to include AWS +S3 credentials in your copy methods or, better yet, store them as environmental variables. +In addition, you’ll need to provide the env var S3_TEMP_BUCKET, which is the bucket name used +for storing data en route to Redshift.
+
Whitelisting
+
Remember to ensure that the IP address from which you are connecting has been whitelisted.
+
+
+
+
+

QuickstartÂś

+

Query the Database

+
from parsons import Redshift
+rs = Redshift()
+table = rs.query('select * from tmc_scratch.test_data')
+
+
+

Copy a Parsons Table to the Database

+
from parsons import Redshift
+rs = Redshift()
+table = rs.copy(tbl, 'tmc_scratch.test_table', if_exists='replace')
+
+
+

All of the standard copy options can be passed as kwargs. See the copy() method for all +options.

+
+
+

Redshift ClassÂś

+
+
+class parsons.Redshift(username=None, password=None, host=None, db=None, port=None, timeout=10, s3_temp_bucket=None)[source]Âś
+
+
+connection()[source]Âś
+

Generate a Redshift connection. +The connection is set up as a python “context manager”, so it will be closed +automatically (and all queries committed) when the connection goes out of scope.

+

When using the connection, make sure to put it in a with block (necessary for +any context manager): +with rs.connection() as conn:

+
+
Returns:
+
Psycopg2 connection object
+
+
+ +
+
+query(sql, parameters=None)[source]Âś
+

Execute a query against the Redshift database. Will return None +if the query returns zero rows.

+

To include python variables in your query, it is recommended to pass them as parameters, +following the `psycopg style<http://initd.org/psycopg/docs/usage.html#passing-parameters-to-sql-queries>`_.

+

For example…

+

# Note that the name contains a quote, which could break your query if not escaped +# properly. +name = “Beatrice O’Brady” +sql = “SELECT * FROM my_table WHERE name = %s” +rs.query(sql, parameters=[name])

+

An example of passing a list of values…

+

names = [“Allen Smith”, “Beatrice O’Brady”, “Cathy Thompson”] +placeholders = ‘, ‘.join(‘%s’ for item in names) +sql = f”SELECT * FROM my_table WHERE name IN ({placeholders})” +rs.query(sql, parameters=names)

+

Using the parameters argument ensures that values are escaped properly, and avoids SQL +injection attacks.

+
+
Args:
+
+
sql: str
+
A valid SQL statement
+
parameters: list
+
A list of python variables to be converted into SQL values in your query
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+query_with_connection(sql, connection, parameters=None, commit=True)[source]Âś
+

Execute a query against the Redshift database, with an existing connection. +Useful for batching queries together. Will return None if the query +returns zero rows.

+
+
Args:
+
+
sql: str
+
A valid SQL statement
+
connection: obj
+
A connection object obtained from redshift.connection()
+
parameters: list
+
A list of python variables to be converted into SQL values in your query
+
commit: boolean
+
Whether to commit the transaction immediately. If False the transaction will +be committed when the connection goes out of scope and is closed (or you can +commit manually with connection.commit()).
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+copy_s3(table_name, bucket, key, manifest=False, data_type='csv', csv_delimiter=', ', compression=None, if_exists='fail', max_errors=0, distkey=None, sortkey=None, padding=None, varchar_max=None, statupdate=True, compudate=True, ignoreheader=1, acceptanydate=True, dateformat='auto', timeformat='auto', emptyasnull=True, blanksasnull=True, nullas=None, acceptinvchars=True, truncatecolumns=False, aws_access_key_id=None, aws_secret_access_key=None)[source]Âś
+

Copy a file from s3 to Redshift.

+
+
Args:
+
+
table_name: str
+
The table name and schema (tmc.cool_table) to point the file.
+
bucket: str
+
The s3 bucket where the file or manifest is located.
+
key: str
+
The key of the file or manifest in the s3 bucket.
+
manifest: str
+
If using a manifest
+
data_type: str
+
The data type of the file. Only csv supported currently.
+
csv_delimiter: str
+
The delimiter of the csv. Only relevant if data_type is csv.
+
compression: str
+
If specified (gzip), will attempt to decompress the file.
+
if_exists: str
+
If the table already exists, either fail, append, drop +or truncate the table.
+
max_errors: int
+
The maximum number of rows that can error and be skipped before +the copy job fails.
+
distkey: str
+
The column name of the distkey
+
sortkey: str
+
The column name of the sortkey
+
padding: float
+
A percentage padding to add to varchar columns if creating a new table. This is +helpful to add a buffer for future copies in which the data might be wider.
+
varchar_max: list
+
A list of columns in which to set the width of the varchar column to 65,535 +characters.
+
statupate: boolean
+
Governs automatic computation and refresh of optimizer statistics at the end +of a successful COPY command.
+
compudate: boolean
+
Controls whether compression encodings are automatically applied during a COPY.
+
ignore_header: int
+
The number of header rows to skip. Ignored if data_type is json.
+
acceptanydate: boolean
+
Allows any date format, including invalid formats such as 00/00/00 00:00:00, to be +loaded without generating an error.
+
emptyasnull: boolean
+
Indicates that Amazon Redshift should load empty char and varchar fields +as NULL.
+
blanksasnull: boolean
+
Loads blank varchar fields, which consist of only white space characters, +as NULL.
+
nullas: str
+
Loads fields that match string as NULL
+
acceptinvchars: boolean
+
Enables loading of data into VARCHAR columns even if the data contains +invalid UTF-8 characters.
+
dateformat: str
+
Set the date format. Defaults to auto.
+
timeformat: str
+
Set the time format. Defaults to auto.
+
truncatecolumns: boolean
+
If the table already exists, truncates data in columns to the appropriate number +of characters so that it fits the column specification. Applies only to columns +with a VARCHAR or CHAR data type, and rows 4 MB or less in size.
+
aws_access_key_id:
+
An AWS access key granted to the bucket where the file is located. Not required +if keys are stored as environmental variables.
+
aws_secret_access_key:
+
An AWS secret access key granted to the bucket where the file is located. Not +required if keys are stored as environmental variables.
+
+
+
Returns
+
+
Parsons Table or None
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+copy(table_obj, table_name, if_exists='fail', max_errors=0, distkey=None, sortkey=None, padding=None, statupdate=False, compudate=True, acceptanydate=True, emptyasnull=True, blanksasnull=True, nullas=None, acceptinvchars=True, dateformat='auto', timeformat='auto', varchar_max=None, truncatecolumns=False, aws_access_key_id=None, aws_secret_access_key=None)[source]Âś
+

Copy a parsons table object to Redshift.

+
+
Args:
+
+
table_obj: obj
+
A Parsons Table.
+
table_name: str
+
The table name and schema (tmc.cool_table) to point the file.
+
if_exists: str
+
If the table already exists, either fail, append, drop +or truncate the table.
+
max_errors: int
+
The maximum number of rows that can error and be skipped before +the copy job fails.
+
distkey: str
+
The column name of the distkey
+
sortkey: str
+
The column name of the sortkey
+
padding: float
+
A percentage padding to add to varchar columns if creating a new table. This is +helpful to add a buffer for future copies in which the data might be wider.
+
varchar_max: list
+
A list of columns in which to set the width of the varchar column to 65,535 +characters.
+
statupate: boolean
+
Governs automatic computation and refresh of optimizer statistics at the end +of a successful COPY command.
+
compudate: boolean
+
Controls whether compression encodings are automatically applied during a COPY.
+
acceptanydate: boolean
+
Allows any date format, including invalid formats such as 00/00/00 00:00:00, to be +loaded without generating an error.
+
emptyasnull: boolean
+
Indicates that Amazon Redshift should load empty char and varchar fields +as NULL.
+
blanksasnull: boolean
+
Loads blank varchar fields, which consist of only white space characters, +as NULL.
+
nullas: str
+
Loads fields that match string as NULL
+
acceptinvchars: boolean
+
Enables loading of data into VARCHAR columns even if the data contains +invalid UTF-8 characters.
+
dateformat: str
+
Set the date format. Defaults to auto.
+
timeformat: str
+
Set the time format. Defaults to auto.
+
truncatecolumns: boolean
+
If the table already exists, truncates data in columns to the appropriate number +of characters so that it fits the column specification. Applies only to columns +with a VARCHAR or CHAR data type, and rows 4 MB or less in size.
+
aws_access_key_id:
+
An AWS access key granted to the bucket where the file is located. Not required +if keys are stored as environmental variables.
+
aws_secret_access_key:
+
An AWS secret access key granted to the bucket where the file is located. Not +required if keys are stored as environmental variables.
+
+
+
Returns
+
+
Parsons Table or None
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+unload(sql, bucket, key_prefix, manifest=True, header=True, compression='gzip', add_quotes=True, null_as=None, escape=True, allow_overwrite=True, parallel=True, max_file_size='6.2 GB', aws_region=None, aws_access_key_id=None, aws_secret_access_key=None)[source]Âś
+

Unload Redshift data to S3 Bucket. This is a more efficient method than running a query +to export data as it can export in parallel and directly into an S3 bucket. Consider +using this for data of 10MM or more rows.

+
+
sql: str
+
The SQL string to execute to generate the data to unload.
+
buckey: str
+
The destination S3 bucket
+
key_prefix: str
+
The prefix of the key names that will be written
+
manifest: boolean
+
Creates a manifest file that explicitly lists details for the data files +that are created by the UNLOAD process.
+
header: boolean
+
Adds a header line containing column names at the top of each output file.
+
compression: str
+
One of gzip, bzip2 or None. Unloads data to one or more compressed +files per slice. Each resulting file is appended with a .gz or .bz2 extension.
+
add_quotes: boolean
+
Places quotation marks around each unloaded data field, so that Amazon Redshift +can unload data values that contain the delimiter itself.
+
null_as: str
+
Specifies a string that represents a null value in unload files. If this option is +not specified, null values are unloaded as zero-length strings for delimited output.
+
escape: boolean
+
For CHAR and VARCHAR columns in delimited unload files, an escape character () is +placed before every linefeed, carriage return, escape characters and delimiters.
+
allow_overwrite: boolean
+
If True, will overwrite existing files, including the manifest file. If False +will fail.
+
parallel: boolean
+
By default, UNLOAD writes data in parallel to multiple files, according to the number +of slices in the cluster. The default option is ON or TRUE. If PARALLEL is OFF or +FALSE, UNLOAD writes to one or more data files serially, sorted absolutely according +to the ORDER BY clause, if one is used.
+
max_file_size: str
+
The maximum size of files UNLOAD creates in Amazon S3. Specify a decimal value between +5 MB and 6.2 GB.
+
region: str
+
The AWS Region where the target Amazon S3 bucket is located. REGION is required for +UNLOAD to an Amazon S3 bucket that is not in the same AWS Region as the Amazon Redshift +cluster.
+
aws_access_key_id:
+
An AWS access key granted to the bucket where the file is located. Not required +if keys are stored as environmental variables.
+
aws_secret_access_key:
+
An AWS secret access key granted to the bucket where the file is located. Not +required if keys are stored as environmental variables.
+
+
+ +
+
+table_exists(table_name, view=True)[source]Âś
+

Check if a table exists in the database.

+
+
Args:
+
+
table_name: str
+
The table name and schema (tmc.cool_table) to point the file.
+
view: boolean
+
Check to see if a view exists by the same name
+
+
+
Returns:
+
+
boolean
+
True if the table exists and False if it does not.
+
+
+
+
+ +
+
+generate_manifest(buckets, aws_access_key_id=None, aws_secret_access_key=None, mandatory=True, prefix=None, manifest_bucket=None, manifest_key=None, path=None)[source]Âś
+

Given a list of S3 buckets, generate a manifest file (JSON format). A manifest file +allows you to copy multiple files into a single table at once. Once the manifest is +generated, you can pass it with the copy_s3() method.

+

AWS keys are not required if AWS_ACCESS_KEY_ID and +AWS_SECRET_ACCESS_KEY environmental variables set.

+

Args:

+
+
+
buckets: list or str
+
A list of buckets or single bucket from which to generate manifest
+
aws_access_key_id: str
+
AWS access key id to access S3 bucket
+
aws_secret_access_key: str
+
AWS secret access key to access S3 bucket
+
mandatory: boolean
+
The mandatory flag indicates whether the Redshift COPY should +terminate if the file does not exist.
+
prefix: str
+
Optional filter for key prefixes
+
manifest_bucket: str
+
Optional bucket to write manifest file.
+
manifest_key: str
+
Optional key name for S3 bucket to write file
+
+
+
+
Returns:
+
dict of manifest
+
+
+ +
+
+upsert(table_obj, target_table, primary_key, vacuum=True, distinct_check=True)[source]Âś
+

Preform an upsert on an existing table. An upsert is a function in which records +in a table are updated and inserted at the same time. Unlike other SQL databases, +it does not exist natively in Redshift.

+
+
Args:
+
+
table_obj: obj
+
A Parsons table object
+
target_table: str
+
The schema and table name to upsert
+
primary_key: str
+
The primary key column of the target table
+
vacuum: boolean
+
Re-sorts rows and reclaims space in the specified table. You must be a table owner +or super user to effectively vacuum a table, however the method will not fail +if you lack these priviledges.
+
distinct_check: boolean
+
Check if the primary key column is distinct. Raise error if not.
+
+
+
+
+ +
+
+create_schema_with_permissions(schema, group=None)Âś
+

Creates a Redshift schema (if it doesn’t already exist), and grants usage permissions to +a Redshift group (if specified).

+
+
Args:
+
+
schema: str
+
The schema name
+
group: str
+
The Redshift group name
+
type: str
+
The type of permissions to grant. Supports select, all, etc. (For +full list, see the +Redshift GRANT docs)
+
+
+
+
+ +
+
+download(sql)Âś
+

Execute a SQL statement and returns the results, if any.

+
+
Args:
+
+
sql: str
+
A SQL statement
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+duplicate_table(source_table, destination_table, where_clause='', if_exists='fail', drop_source_table=False)Âś
+

Create a copy of an existing table (or subset of rows) in a new +table. It will inherit encoding, sortkey and distkey.

+
+
Args:
+
+
source_table: str
+
Name of existing schema and table (e.g. myschema.oldtable)
+
destination_table: str
+
Name of destination schema and table (e.g. myschema.newtable)
+
where_clause: str
+
An optional where clause (e.g. where org = 1).
+
if_exists: str
+
If the table already exists, either fail, append, drop, +or truncate the table.
+
drop_source_table: boolean
+
Drop the source table
+
+
+
+
+ +
+
+get_columns(schema, table_name)Âś
+

Gets the column names (and some other column info) for a table.

+

If you just need the column names, you can treat the return value like a list, eg:

+
+
Args:
+
+
schema: str
+
The schema name
+
table_name: str
+
The table name
+
+
+
Returns:
+

A dict mapping column name to a dict with extra info. The keys of the dict are ordered +just like the columns in the table. The extra info is a dict with format {

+
+
‘data_type’: str, +‘max_length’: int or None, +‘is_nullable’: bool,
+

}

+
+
+
+ +
+
+get_max_date(table_name, date_column)Âś
+

Return the max date from a table.a

+
+
Args:
+
+
table_name: str
+
Schema and table name
+
date_column: str
+
The column containing the date
+
+
+
+
+ +
+
+get_queries()Âś
+

Return the Current queries running and queueing, along with resource consumption.

+
+

Warning

+

Must be a Redshift superuser to run this method.

+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_table_stats(schema=None, table_name=None)Âś
+

List the tables statistics includes row count and size.

+
+

Warning

+

This method is only accessible by Redshift superusers.

+
+
+
Args:
+
+
schema: str
+
Filter by a schema
+
table_name: str
+
Filter by a table name
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_tables(schema=None, table_name=None)Âś
+

List the tables in a schema including metadata.

+
+
Args:
+
+
schema: str
+
Filter by a schema
+
table_name: str
+
Filter by a table name
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+get_views(schema=None, view=None)Âś
+

List views.

+
+
Args:
+
+
schema: str
+
Filter by a schema
+
view: str
+
Filter by a table name
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+grant_schema_permissions(schema, group, permissions_type='select')Âś
+

Grants a Redshift group permissions to all tables within an existing schema.

+
+
Args:
+
+
schema: str
+
The schema name
+
group: str
+
The Redshift group name
+
type: str
+
The type of permissions to grant. Supports select, all, etc. (For +full list, see the +Redshift GRANT docs)
+
+
+
+
+ +
+
+move_table(source_table, new_table, drop_source_table=False)Âś
+

Move an existing table in the database.

+

It will inherit encoding, sortkey and distkey. Once run, the source table +rows will be empty. This is more efficiant than running +"create newtable as select * from oldtable".

+

For more imformation see: ALTER TABLE APPEND

+
+
Args:
+
+
source_table: str
+
Name of existing schema and table (e.g. myschema.oldtable)
+
new_table: str
+
New name of schema and table (e.g. myschema.newtable)
+
drop_original: boolean
+
Drop the source table.
+
+
+
Returns:
+
None
+
+
+ +
+
+populate_table_from_query(query, destination_table, if_exists='fail', distkey=None, sortkey=None)Âś
+

Populate a Redshift table with the results of a SQL query, creating the table if it +doesn’t yet exist.

+
+
Args:
+
+
query: str
+
The SQL query
+
destination_table: str
+
Name of destination schema and table (e.g. myschema.newtable)
+
if_exists: str
+
If the table already exists, either fail, append, drop, +or truncate the table.
+
distkey: str
+
The column to use as the distkey for the table.
+
sortkey: str
+
The column to use as the sortkey for the table.
+
+
+
+
+ +
+
+rename_table(table_name, new_table_name)Âś
+

Rename an existing table.

+
+

Note

+

You cannot move schemas when renaming a table. Instead, utilize +the :meth:parsons.Redshift.table_duplicate. method.

+
+
+
Args:
+
+
table_name: str
+
Name of existing schema and table (e.g. myschema.oldtable)
+
new_table_name: str
+
New name for table. Note: Omit schema in table name.
+
+
+
+
+ +
+
+union_tables(new_table_name, tables, union_all=True, view=False)Âś
+

Union a series of table into a new table.

+
+
Args:
+
+
new_table_name: str
+
The new table and schema (e.g. myschema.newtable)
+
tables: list
+
A list of tables to union
+
union_all: boolean
+
If False will dedupe rows
+
view: boolean
+
Create a view rather than a static table
+
+
+
Returns:
+
None
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/s3.html b/docs/html/s3.html new file mode 100644 index 0000000000..0fcfcc11ae --- /dev/null +++ b/docs/html/s3.html @@ -0,0 +1,487 @@ + + + + + + + + + + + S3 — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

S3Âś

+

The S3 class heavily reliant on the boto3 python package. It includes a suite of common methods that are commonly +used with S3.

+

In order to instantiate the class you must pass valid kwargs, or have a aws config file stored locally, or store the following +environmental variables:

+
    +
  • 'AWS_ACCESS_KEY_ID'
  • +
  • 'AWS_SECRET_ACCESS_KEY'
  • +
+
+
+class parsons.S3(aws_access_key_id=None, aws_secret_access_key=None)[source]Âś
+

Instatiate by passing aws_access_key_id and aws_secret_access_key +or enviromental variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY.

+
+
+s3 = NoneÂś
+

Boto3 API Session Resource object. Use for more advanced boto3 features.

+
+ +
+
+client = NoneÂś
+

Boto3 API Session client object. Use for more advanced boto3 features.

+
+ +
+
+list_buckets()[source]Âś
+

List all buckets to which you have access.

+
+
Returns:
+
list
+
+
+ +
+
+bucket_exists(bucket)[source]Âś
+

Determine if a bucket exists and you have access to it.

+
+
Args:
+
+
bucket: str
+
The bucket name
+
+
+
Returns:
+
+
boolean
+
True if the bucket exists and False if not.
+
+
+
+
+ +
+
+list_keys(bucket, prefix=None, suffix=None, regex=None, date_modified_before=None, date_modified_after=None)[source]Âś
+

List the keys in a bucket, along with extra info about each one.

+
+
Args:
+
+
bucket: str
+
The bucket name
+
prefix: str
+
Limits the response to keys that begin with the specified prefix.
+
suffix: str
+
Limits the response to keys that end with specified suffix
+
regex: str
+
Limits the reponse to keys that match a regex pattern
+
date_modified_before: datetime.datetime
+
Limits the response to keys with date modified before
+
date_modified_after: datetime.datetime
+
Limits the response to keys with date modified after
+
+
+
Returns:
+
+
dict
+
Dict mapping the keys to info about each key. The info includes ‘LastModified’, +‘Size’, and ‘Owner’.
+
+
+
+
+ +
+
+key_exists(bucket, key)[source]Âś
+

Determine if a key exists in a bucket.

+
+
Args:
+
+
bucket: str
+
The bucket name
+
key: str
+
The object key
+
+
+
Returns:
+
+
boolean
+
True if key exists and False if not.
+
+
+
+
+ +
+
+create_bucket(bucket)[source]Âś
+

Create an s3 bucket. +WARNING - S3 has a limit on the number of buckets you can create in an AWS account, and +that limit is fairly low (typically 100). If you are creating buckets frequently, +you may be mis-using S3, and should consider using the same bucket for multiple tasks. +There is no limit on the number of objects in a bucket. +See AWS bucket restrictions for more info.

+

Also be aware that S3 bucket names are globally unique. So when creating a new bucket, +the name can’t collide with any existing bucket names. If the provided name does collide, +you’ll see errors like IllegalLocationConstraintException or BucketAlreadyExists.

+
+
Args:
+
+
bucket: str
+
The name of the bucket to create
+
+
+
Returns:
+
None
+
+
+ +
+
+put_file(bucket, key, local_path, acl='bucket-owner-full-control', **kwargs)[source]Âś
+

Uploads an object to an S3 bucket

+
+
Args:
+
+
bucket: str
+
The bucket name
+
key: str
+
The object key
+
local_path: str
+
The local path of the file to upload
+
acl: str
+
The S3 permissions on the file
+
kwargs:
+
Additional arguments for the S3 API call. See AWS Put Object documentation for more +info.
+
+
+
+
+ +
+
+remove_file(bucket, key)[source]Âś
+

Deletes an object from an S3 bucket

+
+
Args:
+
+
bucket: str
+
The bucket name
+
key: str
+
The object key
+
+
+
Returns:
+
None
+
+
+ +
+
+get_file(bucket, key, local_path=None)[source]Âś
+

Download an object from S3 to a local file

+
+
Args:
+
+
local_path: str
+
The local path where the file will be downloaded. If not specified, a temporary +file will be created and returned, and that file will be removed automatically +when the script is done running.
+
bucket: str
+
The bucket name
+
key: str
+
The object key
+
+
+
Returns:
+
+
str
+
The path of the new file
+
+
+
+
+ +
+
+get_url(bucket, key, expires_in=3600)[source]Âś
+

Generates a presigned url for an s3 object.

+
+
Args:
+
+
bucket: str
+
The bucket name
+
key: str
+
The object name
+
expires_in: int
+
The time, in seconds, until the url expires
+
+
+
Returns:
+
+
Url:
+
A link to download the object
+
+
+
+
+ +
+
+transfer_bucket(origin_bucket, origin_key, destination_bucket, destination_key=None, suffix=None, regex=None, date_modified_before=None, date_modified_after=None, public_read=True)[source]Âś
+

Transfer files between s3 buckets +Args:

+
+
+
origin_bucket: str
+
The origin bucket
+
origin_key: str
+
The origin file or prefix
+
destination_bucket: str
+
The destination bucket
+
destination_key: str
+
If None then will retain the origin key. If set to prefix will move all +to new prefix
+
suffix: str
+
Limits the response to keys that end with specified suffix
+
regex: str
+
Limits the reponse to keys that match a regex pattern
+
date_modified_before: datetime.datetime
+
Limits the response to keys with date modified before
+
date_modified_after: datetime.datetime
+
Limits the response to keys with date modified after
+
public_read: bool
+
If the keys should be set to public-read
+
+
+
+
Returns:
+
None
+
+
+ +
+ +
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/search.html b/docs/html/search.html new file mode 100644 index 0000000000..80f4a1179d --- /dev/null +++ b/docs/html/search.html @@ -0,0 +1,262 @@ + + + + + + + + + + + Search — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ +
    + +
  • Docs »
  • + +
  • Search
  • + + +
  • + + + +
  • + +
+ + +
+
+
+
+ + + + +
+ +
+ +
+ +
+
+ + +
+ +
+

+ © Copyright 2019, The Movement Cooperative + +

+
+ Built with Sphinx using a theme provided by Read the Docs. + +
+ +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/searchindex.js b/docs/html/searchindex.js new file mode 100644 index 0000000000..f8bad26d5e --- /dev/null +++ b/docs/html/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({docnames:["action_kit","action_network","airtable","aws","azure","bill_com","bloomerang","box","braintree","build_a_connector","census_geocoder","civis","contributing","copper","crowdtangle","databases","dbsync","facebook_ads","freshdesk","github","google","hustle","index","mailchimp","mobilize_america","newmode","ngpvan","notifications","p2a","pdi","redash","rockthevote","salesforce","sftp","table","targetsmart","turbovote","twilio","utilities","zoom"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.viewcode":1,sphinx:55},filenames:["action_kit.rst","action_network.rst","airtable.rst","aws.rst","azure.rst","bill_com.rst","bloomerang.rst","box.rst","braintree.rst","build_a_connector.rst","census_geocoder.rst","civis.rst","contributing.rst","copper.rst","crowdtangle.rst","databases.rst","dbsync.rst","facebook_ads.rst","freshdesk.rst","github.rst","google.rst","hustle.rst","index.rst","mailchimp.rst","mobilize_america.rst","newmode.rst","ngpvan.rst","notifications.rst","p2a.rst","pdi.rst","redash.rst","rockthevote.rst","salesforce.rst","sftp.rst","table.rst","targetsmart.rst","turbovote.rst","twilio.rst","utilities.rst","zoom.rst"],objects:{"parsons.ActionKit":{bulk_upload_csv:[0,1,1,""],bulk_upload_table:[0,1,1,""],collect_upload_errors:[0,1,1,""],create_campaign:[0,1,1,""],create_event_create_form:[0,1,1,""],create_event_create_page:[0,1,1,""],create_event_signup_form:[0,1,1,""],create_event_signup_page:[0,1,1,""],create_generic_action:[0,1,1,""],create_page_followup:[0,1,1,""],create_user:[0,1,1,""],delete_user:[0,1,1,""],get_campaign:[0,1,1,""],get_campaign_fields:[0,1,1,""],get_event_create_form:[0,1,1,""],get_event_create_form_fields:[0,1,1,""],get_event_create_page:[0,1,1,""],get_event_create_page_fields:[0,1,1,""],get_event_signup_form:[0,1,1,""],get_event_signup_form_fields:[0,1,1,""],get_event_signup_page:[0,1,1,""],get_event_signup_page_fields:[0,1,1,""],get_page_followup:[0,1,1,""],get_page_followup_fields:[0,1,1,""],get_user:[0,1,1,""],get_user_fields:[0,1,1,""],update_event:[0,1,1,""],update_event_signup:[0,1,1,""],update_user:[0,1,1,""]},"parsons.ActionNetwork":{add_person:[1,1,1,""],add_tag:[1,1,1,""],get_people:[1,1,1,""],get_person:[1,1,1,""],get_tag:[1,1,1,""],get_tags:[1,1,1,""],update_person:[1,1,1,""]},"parsons.Airtable":{get_record:[2,1,1,""],get_records:[2,1,1,""],insert_record:[2,1,1,""],insert_records:[2,1,1,""],update_record:[2,1,1,""]},"parsons.AzureBlobStorage":{blob_exists:[4,1,1,""],container_exists:[4,1,1,""],create_container:[4,1,1,""],delete_blob:[4,1,1,""],delete_container:[4,1,1,""],download_blob:[4,1,1,""],get_blob:[4,1,1,""],get_blob_url:[4,1,1,""],get_container:[4,1,1,""],list_blobs:[4,1,1,""],list_containers:[4,1,1,""],put_blob:[4,1,1,""],upload_table:[4,1,1,""]},"parsons.BillCom":{check_customer:[5,1,1,""],create_invoice:[5,1,1,""],get_customer_list:[5,1,1,""],get_invoice_list:[5,1,1,""],get_or_create_customer:[5,1,1,""],get_user_list:[5,1,1,""],read_customer:[5,1,1,""],read_invoice:[5,1,1,""],send_invoice:[5,1,1,""]},"parsons.Bloomerang":{create_constituent:[6,1,1,""],create_interaction:[6,1,1,""],create_transaction:[6,1,1,""],delete_constituent:[6,1,1,""],delete_interaction:[6,1,1,""],delete_transaction:[6,1,1,""],get_constituent:[6,1,1,""],get_constituents:[6,1,1,""],get_interaction:[6,1,1,""],get_interactions:[6,1,1,""],get_transaction:[6,1,1,""],get_transaction_designation:[6,1,1,""],get_transaction_designations:[6,1,1,""],get_transactions:[6,1,1,""],update_constituent:[6,1,1,""],update_interaction:[6,1,1,""],update_transaction:[6,1,1,""]},"parsons.Box":{create_folder:[7,1,1,""],delete_file:[7,1,1,""],delete_folder:[7,1,1,""],get_table:[7,1,1,""],list_files:[7,1,1,""],list_folders:[7,1,1,""],upload_table:[7,1,1,""]},"parsons.CensusGeocoder":{geocode_address:[10,1,1,""],geocode_address_batch:[10,1,1,""],geocode_onelineaddress:[10,1,1,""],get_coordinates_data:[10,1,1,""]},"parsons.CivisClient":{client:[11,2,1,""],query:[11,1,1,""],table_import:[11,1,1,""]},"parsons.Copper":{get_activities:[13,1,1,""],get_activity_types:[13,1,1,""],get_companies:[13,1,1,""],get_contact_types:[13,1,1,""],get_custom_fields:[13,1,1,""],get_opportunities:[13,1,1,""],get_people:[13,1,1,""]},"parsons.CrowdTangle":{get_leaderboard:[14,1,1,""],get_links:[14,1,1,""],get_posts:[14,1,1,""]},"parsons.DBSync":{table_sync_full:[16,1,1,""],table_sync_incremental:[16,1,1,""]},"parsons.FacebookAds":{add_users_to_custom_audience:[17,1,1,""],create_custom_audience:[17,1,1,""],delete_custom_audience:[17,1,1,""],get_match_table_for_users_table:[17,3,1,""]},"parsons.Freshdesk":{get_agents:[18,1,1,""],get_companies:[18,1,1,""],get_contacts:[18,1,1,""],get_tickets:[18,1,1,""]},"parsons.Gmail":{send_email:[27,1,1,""]},"parsons.Hustle":{create_agent:[21,1,1,""],create_lead:[21,1,1,""],create_leads:[21,1,1,""],get_agent:[21,1,1,""],get_agents:[21,1,1,""],get_group:[21,1,1,""],get_groups:[21,1,1,""],get_lead:[21,1,1,""],get_leads:[21,1,1,""],get_organization:[21,1,1,""],get_organizations:[21,1,1,""],get_tag:[21,1,1,""],get_tags:[21,1,1,""],update_agent:[21,1,1,""],update_lead:[21,1,1,""]},"parsons.MobilizeAmerica":{get_attendances:[24,1,1,""],get_events:[24,1,1,""],get_events_deleted:[24,1,1,""],get_events_organization:[24,1,1,""],get_organizations:[24,1,1,""],get_people:[24,1,1,""]},"parsons.MySQL":{connection:[15,1,1,""],copy:[15,1,1,""],query:[15,1,1,""],query_with_connection:[15,1,1,""],table_exists:[15,1,1,""]},"parsons.Newmode":{get_action:[25,1,1,""],get_campaign:[25,1,1,""],get_campaigns:[25,1,1,""],get_organization:[25,1,1,""],get_organizations:[25,1,1,""],get_outreach:[25,1,1,""],get_outreaches:[25,1,1,""],get_service:[25,1,1,""],get_services:[25,1,1,""],get_target:[25,1,1,""],get_tool:[25,1,1,""],get_tools:[25,1,1,""],lookup_targets:[25,1,1,""],run_action:[25,1,1,""]},"parsons.PDI":{create_flag_id:[29,1,1,""],delete_flag_id:[29,1,1,""],get_acquisition_types:[29,1,1,""],get_flag_id:[29,1,1,""],get_flag_ids:[29,1,1,""],get_flags:[29,1,1,""],get_questions:[29,1,1,""],get_universes:[29,1,1,""],update_flag_id:[29,1,1,""]},"parsons.Phone2Action":{create_advocate:[28,1,1,""],get_advocates:[28,1,1,""],get_campaigns:[28,1,1,""],update_advocate:[28,1,1,""]},"parsons.Postgres":{connection:[15,1,1,""],copy:[15,1,1,""],query:[15,1,1,""],query_with_connection:[15,1,1,""],table_exists:[15,1,1,""]},"parsons.Redshift":{alter_table_column_type:[3,4,1,""],connection:[3,4,1,""],copy:[3,4,1,""],copy_s3:[3,4,1,""],generate_manifest:[3,4,1,""],query:[3,4,1,""],query_with_connection:[3,4,1,""],unload:[3,4,1,""],upsert:[3,4,1,""]},"parsons.S3":{bucket_exists:[3,1,1,""],client:[3,2,1,""],create_bucket:[3,1,1,""],get_file:[3,1,1,""],get_url:[3,1,1,""],key_exists:[3,1,1,""],list_buckets:[3,1,1,""],list_keys:[3,1,1,""],put_file:[3,1,1,""],remove_file:[3,1,1,""],s3:[3,2,1,""],transfer_bucket:[3,1,1,""]},"parsons.SFTP":{create_connection:[33,1,1,""],get_file:[33,1,1,""],get_file_size:[33,1,1,""],get_table:[33,1,1,""],list_directory:[33,1,1,""],make_directory:[33,1,1,""],put_file:[33,1,1,""],remove_directory:[33,1,1,""],remove_file:[33,1,1,""]},"parsons.Salesforce":{client:[32,2,1,""],delete_record:[32,1,1,""],describe_fields:[32,1,1,""],describe_object:[32,1,1,""],insert_record:[32,1,1,""],query:[32,1,1,""],update_record:[32,1,1,""],upsert_record:[32,1,1,""]},"parsons.Slack":{channels:[27,1,1,""],message:[27,5,1,""],message_channel:[27,1,1,""],upload_file:[27,1,1,""],users:[27,1,1,""]},"parsons.TargetSmartAPI":{data_enhance:[35,1,1,""],district:[35,1,1,""],phone:[35,1,1,""],radius_search:[35,1,1,""],voter_registration_check:[35,1,1,""]},"parsons.TargetSmartAutomation":{match:[35,1,1,""]},"parsons.TurboVote":{get_users:[36,1,1,""]},"parsons.Twilio":{get_account:[37,1,1,""],get_account_usage:[37,1,1,""],get_accounts:[37,1,1,""],get_messages:[37,1,1,""]},"parsons.Zoom":{get_meetings:[39,1,1,""],get_past_meeting:[39,1,1,""],get_past_meeting_participants:[39,1,1,""],get_users:[39,1,1,""]},"parsons.aws":{distribute_task:[3,4,1,""],event_command:[3,4,1,""]},"parsons.braintree":{Braintree:[8,0,1,""]},"parsons.braintree.Braintree":{get_disputes:[8,1,1,""],get_transactions:[8,1,1,""]},"parsons.databases.redshift.redshift":{RedshiftSchema:[3,0,1,""],RedshiftTableUtilities:[3,0,1,""]},"parsons.databases.redshift.redshift.RedshiftSchema":{create_schema_with_permissions:[3,1,1,""],grant_schema_permissions:[3,1,1,""]},"parsons.databases.redshift.redshift.RedshiftTableUtilities":{combine_schema_and_table_name:[3,3,1,""],duplicate_table:[3,1,1,""],get_columns:[3,1,1,""],get_columns_list:[3,1,1,""],get_max_value:[3,1,1,""],get_object_type:[3,1,1,""],get_queries:[3,1,1,""],get_row_count:[3,1,1,""],get_table_definition:[3,1,1,""],get_table_definitions:[3,1,1,""],get_table_stats:[3,1,1,""],get_tables:[3,1,1,""],get_view_definition:[3,1,1,""],get_view_definitions:[3,1,1,""],get_views:[3,1,1,""],is_table:[3,1,1,""],is_view:[3,1,1,""],move_table:[3,1,1,""],populate_table_from_query:[3,1,1,""],rename_table:[3,1,1,""],split_full_table_name:[3,3,1,""],table_exists:[3,1,1,""],union_tables:[3,1,1,""]},"parsons.etl.etl":{ETL:[34,0,1,""]},"parsons.etl.etl.ETL":{add_column:[34,1,1,""],chunk:[34,1,1,""],coalesce_columns:[34,1,1,""],concat:[34,1,1,""],convert_column:[34,1,1,""],convert_columns_to_str:[34,1,1,""],convert_table:[34,1,1,""],cut:[34,1,1,""],fill_column:[34,1,1,""],fillna_column:[34,1,1,""],get_column_max_width:[34,1,1,""],get_column_types:[34,1,1,""],get_columns_type_stats:[34,1,1,""],get_normalized_column_name:[34,3,1,""],long_table:[34,1,1,""],map_and_coalesce_columns:[34,1,1,""],map_columns:[34,1,1,""],match_columns:[34,1,1,""],move_column:[34,1,1,""],reduce_rows:[34,1,1,""],remove_column:[34,1,1,""],remove_null_rows:[34,1,1,""],rename_column:[34,1,1,""],select_rows:[34,1,1,""],set_header:[34,1,1,""],sort:[34,1,1,""],stack:[34,1,1,""],unpack_dict:[34,1,1,""],unpack_list:[34,1,1,""],unpack_nested_columns_as_rows:[34,1,1,""]},"parsons.etl.tofrom":{ToFrom:[34,0,1,""]},"parsons.etl.tofrom.ToFrom":{append_csv:[34,1,1,""],from_columns:[34,5,1,""],from_csv:[34,5,1,""],from_csv_string:[34,5,1,""],from_dataframe:[34,5,1,""],from_json:[34,5,1,""],from_postgres:[34,5,1,""],from_redshift:[34,5,1,""],from_s3_csv:[34,5,1,""],to_civis:[34,1,1,""],to_csv:[34,1,1,""],to_dataframe:[34,1,1,""],to_dicts:[34,1,1,""],to_html:[34,1,1,""],to_json:[34,1,1,""],to_postgres:[34,1,1,""],to_redshift:[34,1,1,""],to_s3_csv:[34,1,1,""],to_sftp_csv:[34,1,1,""],to_zip_csv:[34,1,1,""]},"parsons.google.google_bigquery":{GoogleBigQuery:[20,0,1,""]},"parsons.google.google_bigquery.GoogleBigQuery":{client:[20,2,1,""],copy:[20,1,1,""],delete_table:[20,1,1,""],query:[20,1,1,""],table_exists:[20,1,1,""]},"parsons.google.google_civic":{GoogleCivic:[20,0,1,""]},"parsons.google.google_civic.GoogleCivic":{get_elections:[20,1,1,""],get_polling_location:[20,1,1,""],get_polling_locations:[20,1,1,""]},"parsons.google.google_cloud_storage":{GoogleCloudStorage:[20,0,1,""]},"parsons.google.google_cloud_storage.GoogleCloudStorage":{blob_exists:[20,1,1,""],bucket_exists:[20,1,1,""],client:[20,2,1,""],create_bucket:[20,1,1,""],delete_blob:[20,1,1,""],delete_bucket:[20,1,1,""],download_blob:[20,1,1,""],get_blob:[20,1,1,""],get_bucket:[20,1,1,""],list_blobs:[20,1,1,""],list_buckets:[20,1,1,""],put_blob:[20,1,1,""],upload_table:[20,1,1,""]},"parsons.google.google_sheets":{GoogleSheets:[20,0,1,""]},"parsons.google.google_sheets.GoogleSheets":{add_sheet:[20,1,1,""],append_to_sheet:[20,1,1,""],create_spreadsheet:[20,1,1,""],delete_spreadsheet:[20,1,1,""],format_cells:[20,1,1,""],get_spreadsheet_permissions:[20,1,1,""],get_worksheet:[20,1,1,""],get_worksheet_index:[20,1,1,""],list_worksheets:[20,1,1,""],overwrite_sheet:[20,1,1,""],share_spreadsheet:[20,1,1,""]},"parsons.mailchimp":{Mailchimp:[23,0,1,""]},"parsons.mailchimp.Mailchimp":{get_campaign_emails:[23,1,1,""],get_campaigns:[23,1,1,""],get_lists:[23,1,1,""],get_members:[23,1,1,""],get_unsubscribes:[23,1,1,""]},"parsons.ngpvan.van":{ActivistCodes:[26,0,1,""],BulkImport:[26,0,1,""],CanvassResponses:[26,0,1,""],ChangedEntities:[26,0,1,""],Codes:[26,0,1,""],CustomFields:[26,0,1,""],Events:[26,0,1,""],ExportJobs:[26,0,1,""],FileLoadingJobs:[26,0,1,""],Folders:[26,0,1,""],Locations:[26,0,1,""],People:[26,0,1,""],SavedLists:[26,0,1,""],Scores:[26,0,1,""],Signups:[26,0,1,""],SupporterGroups:[26,0,1,""],SurveyQuestions:[26,0,1,""],Targets:[26,0,1,""]},"parsons.ngpvan.van.ActivistCodes":{apply_activist_code:[26,1,1,""],get_activist_code:[26,1,1,""],get_activist_codes:[26,1,1,""],remove_activist_code:[26,1,1,""]},"parsons.ngpvan.van.BulkImport":{bulk_apply_activist_codes:[26,1,1,""],get_bulk_import_job:[26,1,1,""],get_bulk_import_mapping_type:[26,1,1,""],get_bulk_import_mapping_types:[26,1,1,""],get_bulk_import_resources:[26,1,1,""]},"parsons.ngpvan.van.CanvassResponses":{get_canvass_responses_contact_types:[26,1,1,""],get_canvass_responses_input_types:[26,1,1,""],get_canvass_responses_result_codes:[26,1,1,""]},"parsons.ngpvan.van.ChangedEntities":{get_changed_entity_resource_fields:[26,1,1,""],get_changed_entity_resources:[26,1,1,""]},"parsons.ngpvan.van.Codes":{create_code:[26,1,1,""],delete_code:[26,1,1,""],get_code:[26,1,1,""],get_code_supported_entities:[26,1,1,""],get_code_types:[26,1,1,""],get_codes:[26,1,1,""],update_code:[26,1,1,""]},"parsons.ngpvan.van.CustomFields":{get_custom_field:[26,1,1,""],get_custom_fields:[26,1,1,""],get_custom_fields_values:[26,1,1,""]},"parsons.ngpvan.van.Events":{add_event_shift:[26,1,1,""],create_event:[26,1,1,""],delete_event:[26,1,1,""],get_event:[26,1,1,""],get_event_types:[26,1,1,""],get_events:[26,1,1,""]},"parsons.ngpvan.van.ExportJobs":{export_job_create:[26,1,1,""],get_export_job:[26,1,1,""],get_export_job_types:[26,1,1,""]},"parsons.ngpvan.van.FileLoadingJobs":{create_file_load:[26,1,1,""],create_file_load_multi:[26,1,1,""]},"parsons.ngpvan.van.Folders":{get_folder:[26,1,1,""],get_folders:[26,1,1,""]},"parsons.ngpvan.van.Locations":{create_location:[26,1,1,""],delete_location:[26,1,1,""],get_location:[26,1,1,""],get_locations:[26,1,1,""]},"parsons.ngpvan.van.People":{apply_canvass_result:[26,1,1,""],apply_person_code:[26,1,1,""],apply_response:[26,1,1,""],create_relationship:[26,1,1,""],find_person:[26,1,1,""],find_person_json:[26,1,1,""],get_person:[26,1,1,""],toggle_volunteer_action:[26,1,1,""],update_person:[26,1,1,""],update_person_json:[26,1,1,""],upsert_person:[26,1,1,""],upsert_person_json:[26,1,1,""]},"parsons.ngpvan.van.SavedLists":{download_saved_list:[26,1,1,""],get_saved_list:[26,1,1,""],get_saved_lists:[26,1,1,""],upload_saved_list:[26,1,1,""]},"parsons.ngpvan.van.Scores":{get_score:[26,1,1,""],get_score_update:[26,1,1,""],get_score_updates:[26,1,1,""],get_scores:[26,1,1,""],update_score_status:[26,1,1,""],upload_scores:[26,1,1,""]},"parsons.ngpvan.van.Signups":{create_signup:[26,1,1,""],delete_signup:[26,1,1,""],get_event_signups:[26,1,1,""],get_person_signups:[26,1,1,""],get_signup:[26,1,1,""],get_signups_statuses:[26,1,1,""],update_signup:[26,1,1,""]},"parsons.ngpvan.van.SupporterGroups":{add_person_supporter_group:[26,1,1,""],create_supporter_group:[26,1,1,""],delete_person_supporter_group:[26,1,1,""],delete_supporter_group:[26,1,1,""],get_supporter_group:[26,1,1,""],get_supporter_groups:[26,1,1,""]},"parsons.ngpvan.van.SurveyQuestions":{apply_survey_response:[26,1,1,""],get_survey_question:[26,1,1,""],get_survey_questions:[26,1,1,""]},"parsons.ngpvan.van.Targets":{create_target_export:[26,1,1,""],get_target:[26,1,1,""],get_target_export:[26,1,1,""],get_targets:[26,1,1,""]},"parsons.redash":{Redash:[30,0,1,""]},"parsons.redash.Redash":{get_cached_query_results:[30,1,1,""],get_fresh_query_results:[30,1,1,""],load_to_table:[30,5,1,""]},"parsons.rockthevote.rtv":{RockTheVote:[31,0,1,""]},"parsons.rockthevote.rtv.RockTheVote":{create_registration_report:[31,1,1,""],get_registration_report:[31,1,1,""],run_registration_report:[31,1,1,""]},parsons:{ActionKit:[0,0,1,""],ActionNetwork:[1,0,1,""],Airtable:[2,0,1,""],AzureBlobStorage:[4,0,1,""],BillCom:[5,0,1,""],Bloomerang:[6,0,1,""],Box:[7,0,1,""],CensusGeocoder:[10,0,1,""],CivisClient:[11,0,1,""],Copper:[13,0,1,""],CrowdTangle:[14,0,1,""],DBSync:[16,0,1,""],FacebookAds:[17,0,1,""],Freshdesk:[18,0,1,""],GitHub:[19,0,1,""],Gmail:[27,0,1,""],Hustle:[21,0,1,""],MobilizeAmerica:[24,0,1,""],MySQL:[15,0,1,""],Newmode:[25,0,1,""],PDI:[29,0,1,""],Phone2Action:[28,0,1,""],Postgres:[15,0,1,""],Redshift:[3,0,1,""],S3:[3,0,1,""],SFTP:[33,0,1,""],Salesforce:[32,0,1,""],Slack:[27,0,1,""],TargetSmartAPI:[35,0,1,""],TargetSmartAutomation:[35,0,1,""],TurboVote:[36,0,1,""],Twilio:[37,0,1,""],Zoom:[39,0,1,""]}},objnames:{"0":["py","class","Python class"],"1":["py","method","Python method"],"2":["py","attribute","Python attribute"],"3":["py","staticmethod","Python static method"],"4":["py","function","Python function"],"5":["py","classmethod","Python class method"]},objtypes:{"0":"py:class","1":"py:method","2":"py:attribute","3":"py:staticmethod","4":"py:function","5":"py:classmethod"},terms:{"00pm":24,"00z":23,"01t00":23,"01t15":26,"01t18":26,"1000s":8,"10mm":3,"11t17":26,"128m":0,"15min":3,"2018t12":26,"2018t13":26,"2018t14":26,"21t15":[9,23],"4khmdlvy89teuupsra4cn5o35u9h":7,"6mb":27,"7b39m3ozigytcazbwrbi5f2ssz5j":7,"boolean":[1,2,3,11,13,14,15,18,20,21,23,24,26,28,32,34,35,37],"break":[3,15,22,31],"byte":3,"case":[2,8,20,26,32,34,35],"catch":3,"char":[3,7,8,26],"class":[0,1,2,3,4,5,6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,30,31,32,33,34,35,36,37,38,39],"default":[0,2,3,4,7,8,9,10,11,16,18,20,22,23,26,27,29,30,31,32,34,35,38],"enum":24,"export":[3,7,20,21,22,36],"final":[0,12,20,30,34],"float":[3,11,26,34,35],"function":[3,5,9,15,20,26,29,34,37,38],"import":[0,1,2,3,4,5,6,7,8,9,11,12,13,15,17,18,19,20,21,22,23,25,27,28,29,31,32,33,34,35,36,37,38,39],"int":[0,2,3,5,6,8,9,10,11,13,14,15,16,18,20,23,24,26,28,29,30,31,33,34,35],"long":[17,24,25,26,31,34],"new":[0,3,5,6,7,8,9,12,13,15,16,17,20,22,26,29,31,32,33,34],"null":[3,17,26,34],"public":[3,4,8,9,20,24,26,34,38],"return":[0,1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],"short":[0,26],"static":[3,17,24,34],"super":[3,26],"true":[0,3,5,8,11,13,15,16,20,23,24,26,27,28,29,30,32,34,35],"try":[0,9,17,34],"var":[3,8,17],"while":[8,10,22,24,26,31,38],AWS:[3,22,34],And:12,But:17,For:[1,2,3,5,7,8,9,10,12,14,17,20,22,23,24,25,26,27,28,30,31,32,34,35,37],IDs:[27,29,32],NOT:[2,3,22,26,34],Not:[0,2,3,4,6,7,8,9,12,13,14,18,20,21,22,23,26,28,31,32,34,36,37,39],One:[0,3,4,11,12,17,20,21,26,28,29,34,35],SAS:4,SMS:[28,37],SQS:3,THESE:3,That:9,The:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,37,38,39],Their:26,Then:9,There:[3,6,8,15,25,26],These:[8,26,38],Use:[0,1,2,3,4,6,7,8,9,12,13,17,18,20,21,26,29,34,35,37],Used:[23,26],Useful:[3,15,24,34],Uses:19,Using:[3,15,20,22,23],Will:[3,4,15,20,26],With:[19,34],Yes:[26,38],__c:32,__init__:9,__name__:9,_copper:13,_tables_:3,_templat:9,_views_:3,abbrevi:[10,12,28],abc123:[],abc:3,abil:15,abilitec_consumer_link:35,abl:[1,3,9,12,17,22,35],about:[1,3,5,8,9,10,12,20,23,26,27,37],abov:[9,14,34],absolut:[3,33,34],absplit:23,accept:[9,17,26,31],acceptanyd:3,acceptinvchar:3,access:[0,1,3,4,5,7,8,9,11,14,17,18,19,20,22,23,24,26,27,28,34,35,38],access_token:[7,9,17,19],accompani:23,accord:[2,3],accordingli:28,account:[0,1,2,3,4,7,8,9,14,17,18,19,20,23,29,31,32,36],account_domain:4,account_id:14,account_kei:4,account_nam:4,account_sid:37,account_url:4,accountid:14,achiv:34,acl:[3,34],acquir:[17,32],acquisit:29,across:[0,3,9,14,23],act:20,action:[0,3,13,22,25,26],action_kit_domain:0,action_kit_password:0,action_kit_usernam:0,actionkit:[3,8,12,22],actionnetwork:1,activ:[4,12,13,23,24,26,29,37,38,39],activist:[22,24,34],activist_cod:34,activist_code_id:26,activistcod:26,activistcodeid:26,activitiy_typ:13,actual:[17,34],ad_account_id:17,add:[0,1,3,4,9,12,13,14,17,20,24,27,28,30,34,38],add_column:34,add_event_shift:26,add_person:1,add_person_supporter_group:26,add_quot:3,add_sheet:20,add_tag:1,add_users_to_custom_audi:17,added:34,addhandl:22,adding:[12,17],addit:[1,3,4,11,12,13,18,24,26,27,28,31,32,34,38],addition:27,address1:28,address2:28,address:[0,1,3,5,9,10,17,18,20,21,23,26,27,28,32,35,39],address_field:20,address_lin:10,address_line1:26,address_line2:26,address_typ:35,adher:[9,22,26],adjust:38,admin:[26,37],administr:[6,20,29],adsfasdf:26,advanc:[3,11],advertis:17,advic:9,advoc:28,advocaci:[0,25,28],advocacy_cal:24,advocate_id:28,advocates_data:28,affili:24,after:[0,3,8,9,12,22,23,24,26,34,37],afternoon:12,again:[12,22],against:[3,9,15,26,34],age:[3,35],age_max:35,age_min:35,agent:[18,21],agent_id:21,airtabl:[9,22],airtable_api_kei:2,aizasyaovzvel:20,ak_contact:32,ak_id:0,aka:[4,20,26,27,36],alia:[],align:[20,34],alik:9,all:[1,3,4,5,7,8,9,11,12,14,15,16,17,18,20,21,22,23,24,26,27,28,29,32,34,35,38,39],all_contact:[1,5,32],all_item:7,allen:[3,15],allow:[1,2,3,6,9,15,16,17,20,22,23,26,28,29,30,33,34,35,36,37],allow_overwrit:3,almost:9,along:[3,27,28,35],alpha:[24,35],alphabet:9,alphanumer:[7,8,34,35],alreadi:[3,5,11,15,20,26,28,34],also:[0,2,3,6,8,9,15,19,20,24,25,26,27,30,34,38],alter:[3,8,32],alter_t:3,alter_table_column_typ:3,altern:[3,19,20,34],alwai:[9,22,34],amazon:[15,22],america:22,amm:29,among:3,an_api_token:1,analyz:34,ander:35,anderson:35,ani:[0,1,3,4,5,9,12,15,17,22,23,24,26,27,34,35],anonym:30,anoth:[26,34],answer:26,anyon:[17,20],api:[9,12,22,29,38],api_connector:9,api_kei:[2,6,9,11,13,14,18,20,22,23,24,26,27,34,35,39],api_password:25,api_secret:39,api_token:[1,29],api_url:5,api_us:25,api_vers:25,apiconnector:9,apolit:32,app:[3,7,14,17,27,28,39],app_cr:20,app_id:[17,28],app_kei:28,app_secret:17,appear:[11,17],append:[3,4,11,15,20,22,34],append_csv:34,append_to_sheet:20,appli:[1,3,9,20,21,26,34],applic:[9,13,17,18,20,23,26,28,35],apply_activist_cod:26,apply_canvass_result:26,apply_person_cod:26,apply_respons:26,apply_survey_respons:26,appreci:12,approach:[0,1,2,3,4,6,7,8,13,17,18,20,21,23,29,32,35,36,37],appropri:[3,22,30],approv:[12,26],approve_tolder:26,approve_toler:26,arbitrari:[3,4,20,34],arbitrarili:12,archiv:[23,26,27,33,34],archive_path:34,aren:28,arg:[0,1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,39],argument:[0,1,2,3,4,5,6,7,8,9,11,13,14,15,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,34,35,36,37,38,39],arn:3,around:[3,9,12,17],arrai:34,as_us:27,asc:[9,23],ascend:2,asdfa:26,ask:[9,12,26],aspect:8,assertequ:9,assign:[3,21,26],associ:[0,4,16,21,22,23,24,25,26,27,34],assoic:0,assum:[14,23],async:3,asynchron:[26,38],atla:[],attach:27,attack:[3,15,20],attempt:[2,3,9,17,26,31,34],attend:24,attribut:[7,11],audienc:17,audience_id:17,audience_nam:17,auth:[0,9,36,37],auth_token:37,authent:[0,1,3,4,5,6,7,8,9,11,14,15,17,18,19,20,23,24,25,27,28,29,30,31,32,33,34,35,36,37,39],authet:6,author:[0,37],authorship:27,auto:[3,26],auto_approv:26,auto_averag:26,auto_toler:26,autocreate_user_field:0,autofunct:3,autom:12,automat:[2,3,4,9,15,20,26,28,33,34],avail:[7,8,20,26,27,28,29],avali:26,avenu:20,averag:26,avoid:[3,12,14,15,18,20,22,34],awai:[12,17],awar:34,aws:3,aws_access_kei:[26,38],aws_access_key_id:[3,34,38],aws_region:3,aws_secret_access_kei:[3,26,34,38],azur:22,azure_account_domain:4,azure_account_nam:4,azure_account_url:4,azure_blob:4,azure_credenti:4,azureblobstorag:4,b100:20,back:[0,8,12,28,30],background:20,backgroundcolor:20,ballot:20,bank:[8,26],bar:[3,30,34],barrier:22,base:[0,2,3,8,9,11,14,18,23,26,27,30,32,34,35],base_kei:2,base_url:30,bash:9,basic:[0,8,9,36],bat:12,batch:[0,3,10,15,33],baz:[3,34],beatric:[3,15,20],becaus:[14,17],becom:[4,34],been:[0,3,7,9,12],befor:[3,9,11,12,17,23,26,31,34,37],before_campaign_last_s:[9,23],before_create_tim:23,before_date_cr:[9,23],before_last_chang:23,before_send_tim:23,before_timestamp_opt:23,begin:[3,20,23,26,29,34],behalf:20,behavior:[22,34],behaviour:[11,34],being:[22,28,31,38],belief:22,belong:4,below:[9,12,20,24,27,34],benchmark:10,best:[12,14,18],better:34,between:[3,8,11,15,16,22,26,30,31,35],beyond:[14,17,26],big:[3,16,26],big_queri:20,bigtabl:20,bill:[20,22],billcom:5,billion:14,bin:12,binari:[20,27],birth:[17,26],birthdai:34,bit:12,blank:[0,3],blanksasnul:3,blob:[12,20,22],blob_exist:[4,20],blob_nam:[4,20],blobclient:4,blobsaspermiss:4,block:[3,4,10,15,31,33],bloomerang:22,bloomerang_api_kei:6,bloomerang_client_id:6,bloomerang_client_secret:6,blue:20,bnh:29,bnm:29,bob:20,bodi:[5,27],bold:20,bool:[0,3,4,8,11,16,20,24,27,29,30,31,32,34],boston:[0,6],bot:27,both:[6,14,17,20,21,24,26],both_user_and_partner_provid:17,boto3:[3,22],bounc:1,box:22,box_access_token:7,box_client_id:7,box_client_secret:7,box_fil:7,boxfil:7,boxsdk:7,bradi:[3,15,20],brain:8,braintre:22,braintree_merchant_id:8,braintree_private_kei:8,braintree_public_kei:8,braintree_timeout:8,branch:9,broad:22,buckei:3,bucket:[3,4,20,26,34,38],bucket_exist:[3,20],bucket_nam:20,bucket_region:3,bucketalreadyexist:3,buffer:[0,3],bug:12,build:[12,22],built:[3,12,22],bulk:[0,3,12,35,38],bulk_apply_activist_cod:26,bulk_upload_csv:0,bulk_upload_t:[0,3],bulkimport:26,busi:[7,8,17],button:[12,13],bxxxxxx:27,bz2:3,bzip2:3,cach:30,calcul:[26,34],california:29,call:[0,1,2,3,6,8,9,12,13,14,18,19,20,22,26,27,28,32,34,35,36,37],call_back:35,callabl:34,callback:35,campaign:[0,9,23,25,28],campaign_id:[0,23,25,28],can:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,39],cannot:[1,3,26,28,34],canvass:24,canvassrespons:26,capac:22,capit:12,caption:34,card:8,care:0,carriag:3,categori:[23,37],cathi:[3,15],caus:[3,28],cell:[17,20,26,34],cell_format:20,censu:22,censusgecod:10,censusgeocod:10,center:20,central:22,certif:30,chang:[0,3,8,9,20,23,27,34],changedent:26,channel:[6,25,27],channel_1:27,channel_2:27,charact:[3,10,14,26,34,35],chat:27,check:[3,5,9,11,12,14,15,16,17,18,20,31,32,35],check_custom:5,check_env:[9,19],checkenv:9,chicago:26,chines:14,choic:13,chunk:34,chunk_siz:[15,16],citi:[0,6,10,17,26,28,35],civi:[22,34],civic:25,civis_api_kei:[11,34],civis_databas:11,civisarg:[11,34],civiscli:11,classifi:12,classmethod:[3,27,30,34],claus:3,clean:[23,34],cleanup:3,cleanup_s3_fil:3,cleanup_temp_t:3,cli:3,click:[8,13],client:[0,3,4,6,7,9,11,19,20,21,32,35,36],client_id:[6,7,21],client_secret:[6,7,21],clone:12,close:[3,15,17,33,37],cloud:[4,11,12,22,26,32],cloudwatch:3,cluster:[3,11,34],coalesc:34,coalesce_column:34,code:[3,9,14,16,17,22,24,25,28,34,35],code_id:26,code_supported_ent:26,code_typ:26,coerce_float:34,col:[3,20,34],collabor:19,collect:[0,9,12,20,23],collect_upload_error:0,collid:3,color:20,column:[0,1,2,3,8,10,11,13,14,17,18,20,21,24,26,32,34,35],column_a:2,column_map:34,column_nam:[3,34],column_str:2,columna:2,columnb:2,columnn:[3,34],columntyp:3,com:[0,1,2,3,6,9,13,15,17,18,20,22,26,27,29,34,35,39],combin:[2,3,26,27,39],combine_schema_and_table_nam:3,comma:[9,14,23],command:[3,9,12],comment:12,commerci:[7,22],commit:[3,9,15],committe:26,common:[9,35],commonli:[3,34],commun:[9,12,21,22,24,35,37],compani:[13,18],companies_custom_field:13,companies_phone_numb:13,companies_soci:13,companies_websit:13,company_id:18,company_tbl:13,compar:[15,17,26],comparison:34,compat:3,compil:29,complaint:1,complet:[0,9,11,28,31,34,35],complex:12,compon:34,composit:35,composite_score_max:35,composite_score_min:35,compound:[11,34],compress:[3,26,33,34,38],compupd:3,comput:[3,12],concat:34,concaten:34,concept:22,conduct:26,conferenc:39,config:26,configur:[9,15,23,26,35],confirm:28,conflict:12,conform:26,conn:[3,15,33],connect:[3,9,15,16,32,33,34],connector:[3,4,7,11,12,16,17,20,22,29,31,39],consid:[3,12,34],consist:[3,9,12,28],consol:[3,17,20,22,27,37],constitu:6,constituent_id:6,constructor:[7,20,34],consum:[22,34,35],consumpt:[3,22],conta:34,contact:[5,13,14,18,26,29,32],contact_type_id:26,contactfirstnam:5,contacts_t:32,contactsactivistcod:26,contacttypeid:26,contain:[2,3,4,5,9,15,20,21,22,24,26,32,33,34,35,36,38],container_exist:4,container_nam:4,containercli:4,content:[4,14,28,30,33],content_typ:4,contentset:4,context:[3,12,15],contribut:[22,26],contribution_histori:26,contributor:[12,19,22],control:[3,8,34],conveni:[3,34,37],convent:26,convers:[2,26,27],convert:[3,15,17,33,34,38],convert_column:34,convert_columns_to_str:34,convert_t:34,cool_tabl:3,coop:[9,19,22],coordin:[10,25],copi:[3,15,16,20,34],copper:22,copper_api_kei:13,copper_user_email:13,copy_arg:[3,34],copy_s3:3,core:4,corner:8,correct:[26,27],correctli:9,correspond:[4,9,26],could:[3,9,14,15,25,26],couldn:1,count:[3,8,9,14,23,34],counti:17,countri:17,coupl:29,cover:28,cpd:26,creat:[0,1,3,4,5,6,7,8,9,11,12,13,15,16,17,18,19,20,21,22,23,27,28,31,32,33,34,38,39],create:26,create_:9,create_advoc:28,create_ag:21,create_bucket:[3,20],create_campaign:0,create_cod:26,create_connect:33,create_constitu:6,create_contain:4,create_custom:5,create_custom_audi:17,create_dataset:20,create_ev:26,create_event_create_form:0,create_event_create_pag:0,create_event_signup_form:0,create_event_signup_pag:0,create_file_load:26,create_file_load_multi:26,create_flag_id:29,create_fold:7,create_generic_act:0,create_interact:6,create_invoic:5,create_lead:21,create_loc:26,create_page_followup:0,create_person:9,create_registration_report:31,create_relationship:26,create_schema_with_permiss:3,create_signup:26,create_spreadsheet:20,create_supporter_group:26,create_tag:9,create_target_export:26,create_tim:23,create_transact:6,create_us:0,created_aft:26,created_at:8,created_befor:26,creation:[26,31],cred:20,credenti:[0,1,2,3,4,6,7,8,9,13,15,17,19,20,21,25,26,27,28,29,31,32,34,35,36,37,39],credential_filenam:20,credentials_filenam:20,credit:8,creditcardtyp:6,creds_path:27,criteria:25,crm:[13,32],crowdtangl:22,crowdtangle_api_kei:14,crucial:8,csv:[0,3,4,7,13,15,20,21,22,25,26,30,33,34,38],csv_delimit:3,csv_file:0,csv_name:34,csv_reader:34,csv_writer:34,csvarg:34,curl:2,current:[3,4,5,7,10,14,16,17,24,26,27,31,34,35,38],current_curr:10,custom:[1,5,7,13,17,18,21,25,32],custom_field:[1,13,21,26],custom_field_id:26,custom_fields_avail:13,custom_fields_opt:13,custom_properti:26,customer1:5,customer2:5,customer_data:5,customer_email:5,customer_id:5,customer_nam:5,customfield:26,cut:[17,34],cycl:26,dai:[8,12,17,18,28,37],daili:37,dashboard:14,data:[0,1,2,3,4,5,8,9,10,11,12,13,14,15,16,17,20,22,23,25,26,28,29,31,32,34,35,37],data_enh:[22,35],data_sourc:17,data_t:32,data_typ:[3,4,20],databas:[3,6,11,12,13,20,22,24,26,34],datafram:34,dataframe_to_civi:[11,34],dataset:20,datatyp:15,date:[3,5,8,9,13,14,17,18,23,24,26,28,29,31,37],date_canvass:26,date_cr:[9,23],date_modified_aft:3,date_modified_befor:3,date_of_birth:[26,34],date_s:37,date_sent_aft:37,date_sent_befor:37,datecanvass:26,dateformat:3,datelimit:30,datetim:[3,4,28],db_scratch:34,db_sync:16,dbsync:16,dd693a3e74:23,ddl:[3,34],ddthh:14,dead:29,debate_watch_parti:24,debug:[3,22,34,35],decid:12,decim:3,decompress:[3,34],decreas:22,dedupl:3,def:[3,9],default_acl:20,defin:[3,9,17,20,22,26],definit:[3,34],delet:[0,1,3,4,6,7,9,17,18,20,24,26,27,29,32,33],delete_:9,delete_blob:[4,20],delete_bucket:20,delete_cod:26,delete_constitu:6,delete_contain:4,delete_custom_audi:17,delete_ev:26,delete_fil:7,delete_flag_id:29,delete_fold:7,delete_interact:6,delete_loc:26,delete_memb:9,delete_person_supporter_group:26,delete_record:32,delete_signup:26,delete_spreadsheet:20,delete_supporter_group:26,delete_t:20,delete_transact:6,delete_us:0,delimit:[3,26,34],dep:22,depend:[3,9,14,16,18,22,25],deploi:3,deprec:26,desc:[9,23],describ:[9,11,30,32],describe_field:32,describe_object:32,descript:[9,17,23,26,29,34,38],design:6,designation_id:6,desir:[27,32,34],desired_column:34,dest_column:34,destin:[3,15,16,34],destination_bucket:3,destination_data:16,destination_db:16,destination_kei:3,destination_pg:16,destination_r:16,destination_t:[3,16],detail:[1,3,5,12,17,26,27,32,35],determin:[3,9,23,27,34],dev:[5,15],dev_kei:5,develop:[1,5,7,9,12,17,20,24,27],deviat:26,dialect:[15,34],dict:[0,2,3,4,5,8,10,12,13,20,21,24,26,27,28,29,30,32,34,37],dictionari:[0,1,2,5,8,20,21,25,26,28,34],did:30,differ:[0,3,8,9,22,26,32,33,34],digit:[1,10,26,28],digital_source__c:32,direct:[2,9,23,26],directli:[3,4,8,22,26,27,31,34],directori:[9,15,33],disabl:35,disapprov:26,disburs:8,disbursement_d:8,disbursement_end_d:8,disbursement_start_d:8,discard:24,disclosure_field_valu:26,discuss:[9,12],displai:39,disput:8,disputesearch:8,distanc:24,distinct:[3,5,16],distinct_check:[3,16],distkei:[3,11,34],distribut:[3,11,34],distribute_task:3,district:[26,35],district_field:26,district_field_valu:26,diststyl:[11,34],divid:[0,34],dnc:29,dnr:29,do_sloooooow_th:3,dob:[17,26,34,35],dobd:17,dobi:17,dobm:17,doc:[1,2,3,5,6,9,12,17,18,20],docstr:[9,14],document:[0,1,2,3,4,6,8,11,13,14,15,18,20,22,23,24,26,27,28,32,35,37],doe:[3,9,10,14,15,34,35],doesn:[0,1,3,9,20,24,29,34],domain:[0,3,4,9,18,20],don:[12,17,20,22,26,34],donat:[6,8,13],done:[3,4,9,12,20,31,33,34],donor:[1,6],dot:[9,23],doubl:[9,11,34],down:[12,22],download:[3,4,7,19,20,22,26,31,33],download_blob:[4,20],download_fil:[3,19],download_saved_list:26,downloaded_t:7,dp4rqi0cz5qckz361fziavdtdwxz:7,drive:[7,20],drop:[3,11,13,15,16,20,34],drop_origin:3,drop_source_t:3,dropbox:7,due:[5,22],due_dat:5,dummi:34,duplic:[3,34],duplicate_t:3,dure:3,durham:35,dwid:26,dxxxxxxx:27,e501:[11,34],each:[0,2,3,5,8,9,12,14,15,17,24,26,27,28,34,35],earliest:[14,18],easi:22,easier:[9,12],easiest:[2,27],easili:[15,27,34],ecommerc:23,edit:[1,9,26],editor_email:20,effect:[3,26],effective_d:8,effici:[3,22,34],either:[0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,34,35,36,37,39],elect:20,election_id:20,election_record:26,els:9,elsewher:34,email:[0,1,5,6,9,13,17,18,20,21,23,26,27,28,32,34,35,39],email_address:[1,26],email_messag:20,email_optin:28,email_optout:28,email_skinni:34,email_typ:23,emailaddress:6,emails_hom:34,emails_work:34,emoji:12,empti:[0,2,3,9,10,20,25,34],emptyasnul:3,enabl:[3,9,20,23,35],enclos:9,encod:[3,20,30,34],encount:[12,34],encourag:[12,22],end:[3,5,8,20,24,26,29,32,34,37],end_dat:[8,14,26,29,37],end_tim:26,endpoint:[0,1,2,3,6,9,13,14,18,19,26,27,32,35],endr:29,enforc:[9,10],engag:[22,25],enhanc:[12,22],enough:[3,17],ensur:[3,9,15,20,24,26],enter:20,entir:[9,12,34],entri:[1,6],entry_id:1,enumer:[9,26],env:[0,2,3,6,7,8,9,13,14,15,17,18,20,21,23,26,28,32,34,36,37,38,39],environ:[3,4,7,9,11,19,20,22,27,28,30,31,34],environment:[0,1,2,3,4,6,7,8,9,11,13,14,15,17,18,19,20,21,23,24,25,26,28,29,30,31,32,34,35,36,37,39],envron:[],episod:14,equival:39,equivil:2,error:[0,1,2,3,6,11,12,16,21,23,28,29,32,34],escap:[3,15,20],especi:12,establish:[3,9,15],etc:[3,8,9,24,26],etl:[7,22,34],evalu:2,even:[3,8,11,22,29,30,32,34],evenli:34,event:[0,1,3,9,23,24,34],event_command:3,event_cr:26,event_create_form_id:0,event_create_page_id:0,event_id:[0,26],event_signup_dict:0,event_signup_form_id:0,event_signup_id:[0,26],event_signup_page_id:0,event_typ:24,event_type_id:26,everi:[3,5,9,12,14,23],everyact:26,everyth:[12,34],exactli:[9,17,34],exacttrack:35,exampl:[2,3,4,6,8,9,12,15,20,22,24,26,27,35],exceed:18,excel:2,except:[3,8,9,17,34],exclud:[9,23,24,27,30,34,37],exclude_archiv:27,exclude_field:[9,23],exclude_ful:24,exclude_nul:37,exclus:14,execut:[3,11,12,15,31],exist:[0,1,2,3,4,5,9,11,12,15,16,20,23,25,26,29,31,32,34],existing_table_row:[11,34],exists_ok:20,exit:[11,34],expand:[18,24],expand_custom_field:18,expand_field:26,expand_origin:34,expect:[12,17,26,35],expens:14,experi:3,expir:[3,4,34],expires_in:3,expiri:4,explain:12,explicitli:3,export_job_cr:26,export_job_id:26,export_typ:26,exportjob:26,expos:17,express:34,extend:[12,15,31],extens:[3,9,27],extern:[9,12,22,26],external_id:26,extra:[3,22,25,34],extra_clip:14,extract:[13,24,34],facebook:[14,17],facebookad:22,facebookrequesterror:17,fact:26,fail:[3,11,15,20,34,35],failur:[12,35],fairli:3,fake:26,fake_id:1,fake_tag:1,fakedatasourc:3,fakeemail:[1,5],fall:26,fals:[0,2,3,5,8,13,15,18,20,21,23,24,26,27,28,29,31,32,34,35,37],famili:1,family_nam:1,far:22,fast:[0,8,30],faster:3,fastest:3,fault:26,fax:26,fb_access_token:17,fb_ad_account_id:17,fb_app_id:17,fb_app_secret:17,featur:[3,12],feedback:12,feel:12,feet:35,fetch:[0,6,8,18,21,23,24,25,28,29,30,31,37,39],few:[12,16],fewer:3,field:[0,1,2,3,5,6,9,10,13,17,18,21,23,24,27,28,31,32,34],field_typ:26,fight:22,figur:22,file1:27,file2:27,file:[0,3,4,7,9,10,12,15,19,20,22,23,27,33,34,35,38],file_id:7,file_list:7,file_load:26,file_nam:[7,26],file_url:26,fileloadingjob:26,filenam:[7,9,27],filetyp:27,fill:[3,17,34],fill_column:34,fill_valu:34,fillna_column:34,filter:[3,4,13,14,18,20,23,24,26,28,31,34,35,37,39],financialprogram:26,find:[1,2,8,9,12,14,17,26,35],find_person:26,find_person_json:26,findal:9,finish:[11,12,31],first:[0,1,2,3,4,5,6,7,8,9,12,13,17,18,19,20,21,23,24,26,28,29,32,34,35,36,37],first_nam:[0,6,21,26,28,34,35],firstnam:[6,32,34],fit:3,five:10,fix:34,flag:[3,21,22,29,34],flag_descript:29,flag_id:29,flagid:29,flake8:[9,12],flexibl:22,focu:3,focus:[8,15,22,23],folder:[7,9,12,23],folder_id:[7,23,26],folder_list:7,folder_nam:7,folk:12,folloup:0,follow:[1,3,8,9,10,11,12,15,17,18,20,21,22,26,27,32,34,35,38,39],follow_up:21,followup:0,font:20,fontsiz:20,foo:[3,34],forc:8,foregroundcolor:20,foreign:34,fork:[9,12],form:[0,7,8,20],format:[3,4,7,9,14,17,20,23,26,27,29,31,34,35],format_cel:20,formerli:14,formula:[2,20],found:[0,2,7,9,10,11,12,18,20,23,26,30,34,37],framework:[15,16,22],free:[0,10,12],frequent:3,fresh:30,freshdesk:22,freshdesk_api_kei:18,freshdesk_domain:18,friend_to_friend_outreach:24,from:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,35,36,37,39],from_:37,from_column:34,from_csv:[2,3,15,21,22,26,34],from_csv_str:34,from_datafram:34,from_json:34,from_manifest:34,from_petl:34,from_postgr:34,from_redshift:34,from_s3_csv:34,from_user_id:5,ftp:[26,34,35],full:[0,3,6,8,12,16,19,20,21,24,26,27,34,35],full_nam:[21,34],full_table_nam:3,fulli:22,fun:34,func_class:3,func_class_kwarg:3,func_kwarg:3,func_to_run:3,fundrais:[0,24],furnish:24,further:23,futur:[3,24,38],fuzzy_match:34,gain:26,gcs:20,gcs_client:20,gcs_temp_bucket:20,gear:[8,13],gen:17,gender:[17,35],gener:[0,1,2,3,5,8,9,12,13,15,17,20,26,27,31,35,38],generate_manifest:3,geocod:[22,35],geocode_address:10,geocode_address_batch:10,geocode_onelineaddress:10,geograph:[20,35],geographi:10,get:[0,2,3,4,5,6,7,8,12,13,14,18,19,20,21,23,25,26,27,28,29,30,31,32,33,34,36,39],get_:9,get_account:37,get_account_usag:37,get_acquisition_typ:29,get_act:[13,25],get_activist:12,get_activist_cod:[22,26],get_activity_typ:13,get_advoc:28,get_ag:[18,21],get_al:2,get_attend:24,get_blob:[4,12,20],get_blob_url:4,get_bucket:20,get_bulk_import_job:26,get_bulk_import_mapping_typ:26,get_bulk_import_resourc:26,get_cached_query_result:30,get_campaign:[0,23,25,28],get_campaign_email:23,get_campaign_field:0,get_canvass_responses_contact_typ:26,get_canvass_responses_input_typ:26,get_canvass_responses_result_cod:26,get_changed_entity_resourc:26,get_changed_entity_resource_field:26,get_cod:26,get_code_supported_ent:26,get_code_typ:26,get_column:3,get_column_max_width:34,get_column_typ:34,get_columns_list:3,get_columns_type_stat:34,get_compani:[13,18],get_constitu:6,get_contact:18,get_contact_typ:13,get_contain:4,get_coordinates_data:10,get_custom_field:[13,26],get_custom_fields_valu:26,get_customer_list:5,get_disput:8,get_elect:20,get_ev:[9,24,26],get_event_create_form:0,get_event_create_form_field:0,get_event_create_pag:0,get_event_create_page_field:0,get_event_signup:26,get_event_signup_form:0,get_event_signup_form_field:0,get_event_signup_pag:0,get_event_signup_page_field:0,get_event_typ:26,get_events_delet:24,get_events_organ:24,get_export_job:26,get_export_job_typ:26,get_fil:[3,12,33,34],get_file_s:33,get_flag:29,get_flag_id:29,get_fold:26,get_fresh_query_result:30,get_group:21,get_interact:6,get_invoice_list:5,get_lead:21,get_leaderboard:14,get_link:14,get_list:[9,23],get_loc:26,get_match_table_for_users_t:17,get_max_valu:3,get_meet:39,get_memb:[9,23],get_messag:37,get_normalized_column_nam:34,get_object_typ:3,get_opportun:13,get_or_create_custom:5,get_organ:[21,24,25],get_outreach:25,get_page_followup:0,get_page_followup_field:0,get_past_meet:39,get_past_meeting_particip:39,get_peopl:[1,9,13,24],get_people_list:1,get_person:[1,9,26],get_person_signup:26,get_polling_loc:20,get_post:14,get_queri:3,get_quest:29,get_record:2,get_registration_report:31,get_repo:19,get_request:9,get_row_count:3,get_saved_list:26,get_scor:26,get_score_upd:26,get_servic:25,get_signup:26,get_signups_status:26,get_spreadsheet_permiss:20,get_supporter_group:26,get_survey_quest:26,get_tabl:[3,7,33],get_table_def:3,get_table_definit:3,get_table_stat:3,get_tag:[1,21],get_target:[25,26],get_target_export:26,get_th:9,get_ticket:18,get_tool:25,get_transact:[6,8],get_transaction_design:6,get_univers:29,get_unsubscrib:23,get_url:3,get_us:[0,36,39],get_user_field:0,get_user_list:5,get_view:3,get_view_def:3,get_view_definit:3,get_worksheet:20,get_worksheet_index:20,getlogg:[9,22],github:[3,12,22],github_access_token:19,github_password:19,github_usernam:19,give:[7,9],given:[1,3,4,11,13,14,19,20,24,25,26,29,34],given_nam:1,global:[3,20],global_opt_out:21,gmail:34,goal:[0,38],goe:[3,15],good:[9,12],googl:[7,12,16,22,27],google_application_credent:20,google_application_credenti:20,google_bigqueri:20,google_civ:20,google_civic_api_kei:20,google_cloud_storag:20,google_credentials_fil:20,google_drive_credenti:20,google_drive_service_credenti:20,google_keyfile_dict:20,google_sheet:20,googlebigqueri:20,googleciv:20,googlecloud:20,googlecloudstorag:20,googledoc:2,googlesheet:20,got:9,gotv:26,gotvcan:26,govern:[3,22],grab:[22,34],grant:[3,5],grant_schema_permiss:3,granular:3,great:12,greater:35,greater_than_or_equ:8,green:[20,34],group:[3,20,21,27,34,37],group_bi:37,group_count:3,group_id:21,grow:22,gtd:29,guarante:13,guid:[9,12,18,30],gzip:[3,33,34],had:30,handl:[9,20,32,34],handler:3,happi:9,hard:34,hard_delet:32,has:[0,3,5,7,9,12,14,17,22,26,27,30,31,34],hash:[17,23],have:[0,3,5,9,12,14,17,19,20,22,23,24,26,27,31,32,34,35,37,38],header:[3,9,20,24,26,34],heart:9,heavili:34,hello:27,help:[3,9,12,20,23,24,30,34],helper:3,here:[3,9,10,12,23,25,26,30,34,37],hidden:11,hide:17,high:3,histori:26,hit:[14,15,34],hold:[12,26,34],home:[15,26,34,35],hood:[32,33],hook:27,horizont:20,horizontalalign:20,host:[3,12,15,33,34,39],hour:29,hous:26,house_parti:24,household:35,how:[2,3,4,12,17,22,31],howev:[3,18,34,35,38],htm:[],html:[22,27,34],http:[0,1,2,3,5,9,17,20,22,26,27,30,34,35,36],huge:32,human:20,hustl:[9,22],hustle_client_id:21,hustle_client_secret:21,hustle_organ:21,hustleschema:21,hygien:34,iam:3,iam_rol:3,icon:[8,13],id_col:32,id_column:26,id_tabl:32,id_typ:26,ideal:9,idempot:3,ident:3,identifi:[0,11,12,17,23,26,27,29,34],ids:[8,14,20,21,23,24,26,28,29,34],if_exist:[3,13,15,16,20,34],if_extra_column:34,if_missing_column:34,ignor:[3,17,20,26,34,35],ignore_head:3,ignorehead:3,illegallocationconstraintexcept:3,immedi:[3,15],impact:22,implement:[4,9,26],impli:24,import_pag:0,in_list:8,inact:[26,39],inbound:37,includ:[2,3,4,5,6,9,10,12,14,15,18,20,23,24,25,26,28,30,31,34,35,37,39],include_cont:28,include_gener:28,include_index:34,include_origin:34,include_priv:28,include_summari:14,incorpor:12,increment:16,index:[3,5,9,20,22,35],index_head:34,indic:[1,3,14,24,26,27,34],individu:[0,13,18,19,22,23,25,26,35],ineffici:34,infer:[3,15,20,27,34],infil:15,info:[3,4,9,12,26],inform:[0,1,2,3,5,6,7,8,9,10,14,17,18,20,23,24,25,26,27,32,35,37],ingest:22,inherit:[3,35],init:9,initi:[4,26,27,34,38],initial_com:27,inject:[3,15,20],inlin:12,input:[3,17,26,34],input_type_id:26,insert:[2,3,15,32],insert_record:[2,32],insid:[3,9],instal:[9,27,34],instanc:[3,4,9,20,26,30,31,32],instanti:[0,1,2,3,4,5,6,7,8,9,10,11,13,14,15,17,18,19,20,21,23,24,25,26,28,29,30,31,32,33,35,36,37,39],instead:[0,3,5,8,9,12,13,17,22,26,27,30,32,35],instruct:[11,12,18],integ:[28,34,35],integr:[0,4,6,8,9,14,18,21,22,23,26,32,37],intend:3,interact:[2,3,6,11,14,17,20,23,28,29,30,33,38],interaction_id:6,interest:[12,23],interest_category_id:23,interest_id:23,interest_match:23,interfac:26,internet:26,interv:37,introduct:2,invalid:[3,26],invit:21,invoc:[3,34],invoic:5,invoice_d:5,invoice_id:5,invoice_line_item:5,invoice_numb:5,invok:3,involv:[9,12],irrelev:2,is_applic:26,is_binari:27,is_default:29,is_nul:3,is_search:26,is_tabl:3,is_view:3,is_virtu:24,isn:[2,3,12,17],isnot:17,iso8601:26,iso:[9,23,24,26,31],issu:[9,17,19,24,26,34],item:[3,5,13,15,25],iter:[0,9,23,26,34],its:[2,3,12,19,20,22,26,27],itself:[3,9,17,26],jane:[20,34],job:[3,11,34,35],job_config:20,job_id:26,job_nam:35,job_statu:26,job_typ:35,john:[0,6],join:[3,12,15,16,34],josh:17,json:[0,1,3,4,6,7,9,13,20,26,27,34,39],jsonabl:8,just:[3,8,9,12,16,17,22,26,30,34],just_id:8,justic:22,jwt:39,keep:20,kei:[0,1,2,3,4,5,6,7,8,9,11,13,14,16,17,18,20,21,23,24,26,27,28,30,31,33,34,35,39],key_exist:3,key_nam:34,key_prefix:3,key_renam:34,keyerror:9,keyword:[4,5,9,11,14,18,21,30,34,35],kilomet:35,kit:0,know:[9,12,26],known:26,kwarg:[0,1,3,4,5,6,11,16,20,24,26,28,30,34,35],l2vt:29,label:[12,34],lack:3,lambda:34,landlin:17,languag:[1,14,32],languages_spoken:1,larg:[0,9,11,12,23,34],larger:[0,31],last:[4,9,17,21,23,26,28,34,35,37],last_chang:23,last_month:37,last_nam:[0,6,21,26,28,34,35],last_name_exact:35,last_name_is_prefix:35,last_name_prefix_length:35,lastmodifi:3,lastnam:[32,34],lat:25,later:34,latest:14,latitud:[10,25,35],latter:34,layout:12,lbo:29,lead:[21,26],lead_id:21,leaderboard:14,least:17,leav:26,led:22,left:[2,31,34],leftov:3,legal:35,length:[3,9,12,34,35],less:[3,6,26,35],let:12,level:[3,7,11,20,26,34],leverag:[10,15,26,27,28,34],librari:[3,4,9,12,19,22,25,33],like:[0,3,8,9,12,17,22,27,29,34,35],limit:[1,3,10,14,18,24,26,27,29,31,32],line:[3,5,9,10,12,16,22,26,28,34],line_delimit:34,linefe:3,link:[3,8,14,20,23,25,34],linter:[9,12],list:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,18,19,20,21,22,23,24,25,27,28,29,31,32,33,34,35,37,39],list_blob:[4,20],list_bucket:[3,20],list_contain:4,list_directori:33,list_fil:7,list_fold:7,list_id:[14,23,26],list_item:7,list_kei:3,list_nam:26,list_repo_issu:19,list_worksheet:20,listobjectsv2:3,live:[14,39],live_video:14,live_video_complet:14,live_video_schedul:14,load:[3,4,15,19,20,22,30],load_kwarg:20,load_table_from_uri:20,load_to_t:[3,30],loadjobconfig:20,loc_id:26,local:[3,4,12,14,19,20,33,34],local_path:[3,4,19,20,33,34],locat:[0,1,3,10,12,20,24,34,35],location_id:26,log:[3,8,9,11,18,29,32,35],logger:[9,22],loglevel:22,long_tabl:34,longer:[26,35],longitud:[10,25,35],lontitud:35,look:[3,9,12,14,20,22,25,27],lookup:[25,35],lookup_target:25,loop:34,lot:17,low:[3,26],lower:11,lowercas:[23,34],lsd:29,lsr:29,lst:[2,34],luci:22,machin:9,maco:12,made:[7,9,12,14,26,34],mai:[0,3,8,9,11,12,17,20,22,24,26,28,33,38],mail:[20,36,39],mailchimp:[9,22],mailchimp_api_kei:[9,23],main:[26,34],maintain:[7,9,22],mainten:26,make:[3,9,14,15,19,20,22,26,30,31,32,33,34],make_directori:33,male:17,manag:[3,6,13,15,23,32],manager:3,mandatori:3,mangement:1,mani:[3,12,15,17,22,34],manifest:[3,34],manifest_bucket:3,manifest_kei:3,manipul:[3,20,34],mantain:12,manual:[3,8,15,26],map:[2,3,9,17,21,26,28,34],map_and_coalesce_column:34,map_column:34,mark:[3,9],market:[17,32],mass:23,massiv:17,match:[3,12,14,17,23,25,26,27,32,34,35],match_column:34,match_json:26,match_statu:35,math:3,max:[3,5,9,12,13,24,34],max_column:34,max_custom:5,max_dist:24,max_error:[3,11,34],max_file_s:3,max_invoic:5,max_length:3,max_precis:3,max_record:2,max_result:[20,35],max_scal:3,max_timeslot:24,max_us:5,maximum:[1,2,3,6,9,11,18,23,26,34],mayb:29,md5:23,mean:[14,22,26,34],meant:22,mechan:[9,22],meet:[14,24,39],meet_greet:24,meeting_id:39,meeting_typ:39,meeting_uuid:39,meetings_tbl:39,melt:34,member:[3,9,22,23,35],member_id:23,membership:28,membership_status:26,memori:[3,22],menu:[1,26],merchant:8,merchant_account_id:8,merchant_id:8,merg:12,messag:[0,22,27,28,37],message_bodi:5,message_channel:27,message_html:27,message_subject:5,message_text:27,messi:22,messsag:27,meta:[26,32],metadata:[3,4,21,39],meter:35,method:[0,2,3,5,8,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38],middle_nam:35,might:[3,11,22,26,27,34],mile:35,million:3,min_inter:14,min_interact:14,minim:3,minimalist:3,minimum:[22,26,35],minimum_modified_d:13,minor:15,minu:2,minut:[14,38],mirror:9,mis:3,miserl:3,miss:34,mission:22,mix:[27,34],mobil:[18,22],mobilize_america_api_kei:24,mobilizeamerica:24,mock:9,mocker:9,mode:[9,22,27],model:26,modif:26,modifi:[3,9,13,20,34],modul:[9,12,22,26,27],modulenam:22,monitor:14,month:[17,37],monthli:37,more:[0,1,2,3,4,5,6,8,9,10,11,12,14,17,18,22,23,24,25,26,27,32,34,35,37],most:[9,15,22,25,26,34],mov:29,move:[3,9,12,16,19,22,32,34],move_column:34,move_t:3,movementcoop:26,mpim:27,much:[0,8,22,34],multi:[17,25,26],multipl:[0,2,3,4,9,10,14,21,24,26,33,34],must:[2,3,5,6,10,11,16,18,20,21,23,24,26,27,28,30,31,32,34,35,36,39],mutual:14,my_access_token:[17,19],my_account_id:17,my_account_nam:4,my_activist_cod:22,my_api_kei:[18,23,35,39],my_api_secret:39,my_api_token:1,my_app_id:17,my_app_secret:17,my_auth_token:37,my_bucket:[3,22,26],my_channel:27,my_client_id:6,my_client_secret:6,my_csv:33,my_dev_kei:5,my_dir:[3,33],my_domain:18,my_fil:[3,15],my_hustle_group:21,my_id:34,my_job_nam:35,my_kei:[3,6,22],my_lead:21,my_meeting_id:39,my_merchant_id:8,my_nam:[0,39],my_new_record:2,my_org_id:5,my_parsons_t:[7,34],my_password:[5,19,25,29,32],my_private_kei:8,my_public_kei:8,my_saved_list:26,my_schema:[3,15,16,22,34],my_secret:3,my_sftp_password:35,my_sftp_usernam:35,my_slack_fil:27,my_tabl:[3,15,16,20,22,34],my_token:29,my_usernam:[5,19,25,29],myapikei:13,mycampaign:26,mydb:15,myevent:34,myfakekei:2,mygroup:21,myid:[16,21],mymemb:26,myorg:[0,13,36],mys_chema:3,myschema:[3,15],mysecret:21,mysql:16,mysql_db:15,mysql_host:15,mysql_password:15,mysql_port:15,mysql_usernam:15,mytabl:[3,15],myvot:[22,26,34],mywork:34,nah:29,name:[0,1,2,3,4,5,7,9,11,12,15,16,17,19,20,21,22,26,27,28,29,32,33,34,35,37],name_data:34,name_starts_with:4,name_suffix:35,name_t:34,nan:2,napkin:3,nativ:[3,22],native_video:14,navig:[27,32],ncreat:34,ndiststyl:34,necessari:[3,12,15,34],necessarili:17,need:[0,2,3,8,9,12,17,20,22,25,26,27,28,30,31,32,34,35,39],neither:9,nest:[18,27,34],net:4,network:22,new_and_my_open:18,new_column_nam:34,new_ev:26,new_fold:7,new_given_nam:1,new_head:34,new_key_nam:34,new_status_id:26,new_tabl:3,new_table_nam:3,new_volunt:26,newcol:34,newli:[7,26],newmod:25,newmode_api_password:25,newmode_api_us:25,newtabl:3,next:9,ngp:26,ngpvan:[22,38],nich:8,no_overwrite_on_empti:0,node:26,non:[6,17,22,32,34],none:[0,1,2,3,4,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,39],noqa:[6,11,34],nor:9,normal:[17,19,34],notat:[9,23],note:[3,6,7,9,12,15,17,20,21,23,24,26,27,29,30,32,34,39],noth:26,notic:34,notif:[22,35],notifi:[20,26,27],notify_messag:20,now:[7,12,19,20,27],nsw:26,null_a:3,null_valu:34,nulla:3,num_row:[9,34],number:[1,2,3,5,6,9,11,13,15,16,17,18,20,21,22,23,24,26,28,29,34,35,37],numer:35,oauth2:6,oauth:27,obj:[3,4,11,15,17,20,32,33,34],obj_typ:[],object:[0,3,4,7,8,9,11,12,15,16,20,22,23,24,25,26,27,28,29,30,32,33,34,35],object_nam:3,objectt:34,obtain:[0,3,8,11,15,20,23,27,29,39],occasion:8,occur:[23,34],off:3,offer:[15,20],offici:[9,23],offset:[9,23],often:8,old_tabl:3,oldtabl:3,omit:[3,7,20,26,34],ommit:27,omnichannel:18,onc:[1,3,9,11,12,22,26,27,28,34],one:[0,3,4,5,9,12,17,22,24,26,28,32,34,37,39],ones:32,ongo:39,onli:[0,1,2,3,4,5,7,8,9,10,11,12,14,18,20,22,23,24,26,27,28,31,34,35,37,38],onlin:[1,5,8,19,31,36],onlineform:26,onto:34,opaqu:17,open:[3,9,12,15,20,27],oper:[24,29,34],opportun:13,opportunities_custom_field:13,opt:[21,23,28],optim:3,option:[0,1,2,3,4,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,24,26,27,28,29,33,34,35,36,37,39],order:[2,3,6,9,10,14,20,22,23,24,26,31,32,34,35,38],org:[1,3,26,36],org_id:5,organ:[1,4,5,19,21,22,24,25,28,31,34],organiz:26,organization_id:[21,24,25],organization_nam:19,organization_rol:26,orgid:21,origin:[3,12,13,34],origin_bucket:3,origin_kei:3,osdi:1,other:[1,3,4,5,8,9,10,12,15,17,22,23,24,26,31,34,37],otherwis:[5,9,17,20,34],otl:25,our:[9,12,22,28],out:[0,3,9,12,13,15,17,21,22,24,28,31,33,35],output:[2,3,11,12,13,14,15,18,20,21,22,24,26,27,28,33,34,35,36,37,39],outreach:25,outreach_id:25,over:[9,22,23,34],overnight:26,overrid:[3,22,34],overwrit:[0,3,26],overwrite_sheet:20,overwritten:[0,34],own:[3,9,12,14,18,20,24,26],owner:[3,20,34],p2a:28,p_datelimit:30,pack:34,packag:[3,12,15,20,22,34],pad:[3,34],page:[0,1,2,4,6,7,8,17,19,22,23,26,28,30,39],page_followup_id:0,page_id:0,page_numb:6,page_s:6,pagin:[8,28],pair:25,panda:34,panel:8,parallel:3,param:[9,25,30],paramet:[2,3,6,8,9,13,14,15,18,20,23,25,30,31,35],paramiko:33,parent:[7,26,27],parent_code_id:26,parent_fold:7,parent_message_id:27,pars:[10,20],parson:[0,1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,35,36,37,38,39],parsons_blob:20,parsons_bucket:20,parsons_dataset:20,parsons_issues_t:19,parsons_logg:22,parsons_readme_path:19,parsons_repo:19,parsons_skip_import_al:22,parsons_t:20,part:[12,15,17,26,27],parti:[8,9,12,20],partial:28,particip:39,participants_tbl:39,particular:23,partner:[1,17,31],partner_api_kei:31,partner_id:31,partner_provided_onli:17,pass:[0,1,2,3,4,5,6,7,8,9,11,13,14,15,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,35,36,37,39],password:[0,3,5,15,19,25,29,32,33,34,35,36],past:[18,39],path:[0,3,4,9,16,20,26,27,33,34],pattern:[3,12,22],paus:[23,31],pause_tim:30,payload:25,payment:8,pdi:22,pdi_api_token:29,pdi_password:29,pdi_usernam:29,peer:21,pend:[23,26,39],pennsylvania:20,peopl:[1,9,13,24,35],people_custom_field:13,people_email:13,people_phone_numb:13,people_soci:13,people_tbl:13,people_websit:13,people_with_cell_phon:20,per:[1,3,6,8,9,13,15,16,34,35],per_pag:1,percentag:3,perform:[3,14,15,22,34,35,37],period:[11,34,37],perman:32,permiss:[3,4,20,24,26,27,34],permissions_typ:3,permut:0,person:[0,1,4,17,19,20,24,26,28,32,35],person_id:[1,34],perspect:22,petl:22,petl_tbl:34,pgdatabas:[15,34],pghost:[15,34],pgpass:15,pgpassword:[15,34],pgport:[15,34],pguser:[15,34],phone2act:22,phone2action_app_id:28,phone2action_app_kei:28,phone:[3,17,18,20,21,26,28,34,35,37],phone_bank:24,phone_numb:[21,28],phone_typ:26,phones_0:34,phones_1:34,phones_address:28,photo:14,phrase:14,pick:[9,17],ping:12,pip:[9,12,22,34],place:[2,3,26],placehold:[3,15],plaintext:23,plan:18,platform:[0,6,8,11,13,14,18,21,23,24,25,26,34,37,39],pleas:[12,26],point:[3,5,9,20,26,34,35],polit:29,politicaldata:29,poll:[20,26,30],poll_interval_second:31,polling_interv:11,popul:[3,15,17,20,34],popular:15,populate_table_from_queri:3,port:[3,15,33,34],portal:[],posit:[34,35],possibl:[1,9,12,22,26,31,34,37],post:[0,9,14,26,27,30,35,38],postal:[24,25,26,28],postal_address:1,postgr:[13,16,34],postmessag:27,potenti:34,practic:[12,14,18],pre:[3,20],prefer:[12,22,26],preferenc:34,prefix:[2,3,4,20,30,35],prefix_filt:20,preform:3,prepar:17,prepend:34,prepend_valu:34,present:[3,23,24],presign:[3,38],presort:34,prevent:[3,22,26],preview:11,preview_row:11,previou:[1,23,39],primari:[1,3,16,20,22,26,35],primarili:[1,29,38],primary_kei:[3,16],print:[3,34],prior:[16,26],privat:[6,7,8,24,28,33,34],private_channel:27,private_kei:8,priviledg:3,privileg:23,probabl:[7,8,9,17],process:[0,3,8,12,26,29,34,35,38],process_t:3,processor:8,produc:27,product:8,profil:30,profile_email:27,profile_real_name_norm:27,profit:[6,32],programmat:37,progress:[0,22,24,28,35],progress_url:0,project:[9,12,20],project_id:[],promot:24,proper:[0,34],properli:[3,15,20],provid:[2,3,4,5,7,8,9,12,13,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,35,37,39],provinc:26,provis:3,prune:8,psycopg2:[3,15],psycopg:[3,15],public_access:4,public_ar_curr:10,public_channel:27,public_kei:8,public_read:3,public_url:34,public_url_expir:[34,38],publicaccess:4,publicli:[20,26],publicly_view:26,pull:[9,18,19,34],punctuat:17,pure:[3,8],purpos:26,push:9,put:[1,3,4,9,15,20,22,33],put_blob:[4,20],put_fil:[3,22,33],pygithub:19,pytest:[9,12],python3:12,python:[3,9,11,12,15,20,22,27,34],qa_url:29,quantiti:5,queri:[3,5,8,9,11,12,14,15,16,20,22,23,30,32,34],query_api_kei:30,query_dict:8,query_id:30,query_list:8,query_with_connect:[3,15],question:29,queue:3,quick:[7,12],quot:[3,9,11,14,15,26,34],quotat:3,rachel:6,radiu:35,radius_s:35,radius_search:35,radius_unit:35,rais:[3,9,16,21,34],rang:[3,8,20,34,35],rare:22,rate:[14,18,27],rather:[3,12,15,16,35],raw:[27,34],reach:[9,12,31],read:[3,4,5,11,17,20,26,27],read_civis_sql:11,read_custom:5,read_invoic:5,readabl:20,reader:20,readi:[9,12],readm:19,readthedoc:34,realiti:17,reason:[17,35],receiv:[0,5,27,28,35,37],recent:22,recent_campaign:23,recip:12,recipi:[23,27],reclaim:3,recogn:22,recommend:[3,9,10,14,15,20,22,23,26,27,34],record:[0,2,3,6,8,9,10,13,22,23,26,28,32,35],record_id:2,recorded_address:26,recur:26,recurr:26,red:20,redash:22,redash_base_url:30,redash_query_api_kei:30,redash_query_param:30,redash_user_api_kei:30,redshift:[11,16,21,22,26,34],redshift_db:[3,34],redshift_host:[3,34],redshift_password:[3,34],redshift_port:[3,34],redshift_usernam:[3,34],redshiftschema:3,redshifttableutil:3,reduc:[22,26,34],reduce_func:34,reduce_row:34,reducer_fn:34,ref:29,refer:[4,9,12,14,20,22,23,26,27,32],reflect:12,refresh:[3,30],reg:35,regard:39,regex:[3,34],region:3,regist:35,registr:[31,35,36],regular:[23,24],reinstal:27,reject:0,rel:35,relat:3,relationship:[13,26,32],relationship_id:26,releas:[9,22],relev:[3,5,12,14,17],reli:[3,34],remain:24,remaind:34,rememb:[3,9],remot:[33,34],remote_path:[33,34],remov:[3,4,11,17,20,26,33,34,35],remove_activist_cod:26,remove_column:34,remove_directori:33,remove_fil:[3,33,35],remove_null_row:34,remove_origin:3,remove_source_column:34,renam:[3,17,34],rename_column:34,rename_t:3,rep_event_id:26,repeat:3,replac:[20,26,34],repo:19,repo_nam:19,repons:3,report:31,report_id:31,report_timeout_second:31,report_typ:31,reported_demograph:26,repositori:[9,12],repres:[3,14,17,34],request:[0,8,9,11,14,18,19,26,28,29,30,34],requester_email:18,requester_id:18,requests_mock:9,requir:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,18,20,21,22,23,24,26,27,28,29,30,31,32,34,35,36,37,38,39],res:0,reset:32,reshap:34,resolut:[],resourc:[0,3,9,26,37],resource_typ:26,respect:[0,4,8,18,21,25,29,31,32,36,37,39],respond:12,respons:[0,2,3,9,23,27,30],rest:[0,2,3,6,19,32],restrict:[0,3,9,23],result:[0,1,2,3,8,9,11,15,18,20,22,23,26,28,30,31,34,35],result_arrai:0,result_code_id:26,retain:[3,34],retain_origin:34,retri:3,retriev:[7,9,14,20,23],return_typ:10,reus:26,revers:[2,34],review:[9,12,26],right:[1,8,12,34],rock:22,rocki:31,rockthevot:31,role:[3,20,26,39],role_id:[26,39],root:[9,12,22],roughli:17,rout:3,row:[0,2,3,11,13,15,16,17,20,34,35,37],rowreduc:34,rows_list:34,rsa:[33,34],rsa_private_key_fil:[33,34],rss:23,rst:[9,12],rtv:31,rtv_partner_api_kei:31,rtv_partner_id:31,rule:[9,26],run:[3,4,9,11,12,16,20,22,24,25,26,31,33,34,38],run_act:25,run_registration_report:31,s3_temp_bucket:3,sai:0,saleforc:[],salesforc:22,salesforce_domain:32,salesforce_password:32,salesforce_security_token:32,salesforce_usernam:32,same:[3,5,8,9,15,16,22,26,27,34],sampl:[26,34],sample_s:34,sandbox:[5,8,32],satisfi:2,save:[7,13,14,23,27,29,34],saved_list:26,saved_list_download:26,saved_list_id:26,savedlist:26,schedul:[23,39],schema:[11,15,34],schemanam:34,scienc:11,scope:[3,15,27,33],score1:26,score1_column:26,score1_id:26,score2:26,score2_column:26,score2_id:26,score:[14,35],score_column:26,score_id:26,score_map:26,score_update_id:26,scratch:[11,34],screen:30,script:[3,4,12,20,22,26,27,33,34],scroll:8,sdfadsf:26,sdk:[3,4,17],seamlessli:16,search:[7,8,14,22,25,26,35],search_id:35,search_id_typ:35,search_team:14,search_term:14,search_typ:35,searchabl:26,second:[0,1,2,3,4,6,7,8,9,11,13,15,17,18,20,21,23,26,28,29,30,32,34,35,36,37],secondari:26,secret:[3,6,15,17,21,39],secret_loc:27,secret_sauc:15,section:[9,15,23,34],secur:[6,32],security_token:32,see:[0,1,2,3,4,5,6,8,9,11,12,13,14,15,17,18,20,21,22,23,24,25,26,27,28,30,32,33,34,35,36,37,39],seek:22,select:[2,3,15,20,22,32,34,38],select_row:34,self:[3,9,34],send:[0,5,9,13,20,21,23,26,27,37],send_email:27,send_invit:21,send_invoic:5,send_tim:23,sender:27,sens:9,sensit:[2,20,35],sent:[3,5,14,20,23,25,26,27,28,37],separ:[3,9,14,18,23,24,25,34,35],sequenc:3,sequenti:3,seri:3,serial:3,server:[14,26,30,33,34],servic:[7,9,10,12,15,20,22,25,27,33,35,38],service_id:25,session:3,set:[0,1,2,3,4,6,7,8,9,11,12,13,14,15,18,19,20,21,22,23,24,26,27,28,30,31,32,34,36,38,39],set_head:34,set_only_column:0,setformatt:22,setlevel:22,setup:[3,9],sever:24,sex:17,sforce_api_calls_soql:[],sftp:[22,34,35],sftp_password:35,sftp_usernam:35,share:[4,7,20,26,27,32,34],share_spreadsheet:20,share_typ:20,sheet:2,sheet_id:20,shift:26,shift_id:26,shift_nam:26,shim:3,shit:26,short_nam:26,shorter:26,should:[0,3,4,7,9,12,14,17,20,22,26,30,33,34],shouldn:17,show:22,shown:24,sid:[25,37],sign:[2,5,9,13],signatur:[4,9],signup:[0,24,36],signup_cr:26,signup_id:26,signup_page_id:0,signup_upd:26,similar:[0,3,4,7,15,34],similarli:3,simpl:[27,32],simpler:12,simpli:22,simplifi:14,sinc:[13,15,22,23,24,26,28,29,31,34],since_campaign_last_s:[9,23],since_create_tim:23,since_date_cr:[9,23],since_last_campaign:23,since_last_chang:23,since_send_tim:23,since_timestamp_opt:23,singl:[2,3,8,9,10,17,20,21,24,26,30,33,34,35,36],site:26,situat:24,size:[3,20,27,33],skinni:34,skip:[3,9,12,22,23,26],slack:12,slack_api_token:27,slice:3,slot:[24,26],slow:[9,23],small:[11,26],smaller:[0,34],smartvan:[26,35],smith:[0,3,6,15],sms:37,sms_opt_in:28,sms_optin:28,sms_optout:28,snippet:[9,12],snv3vndudw6qsicvzrxk1xm:20,social:[14,22],softwar:19,solut:20,some:[0,3,8,12,14,17,24,26,34,35],some_schema:3,some_t:3,someon:[12,14],somesubtext:2,someth:[3,12],sometim:22,somewhat:8,soql:32,soql_sosl:[],sort:[2,3,9,23,24,34],sort_column:34,sort_dir:[9,23],sort_field:[9,23],sortkei:[3,11,34],sortkey1:[11,34],sortkey2:[11,34],sourc:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,39],source_column:34,source_data:16,source_db:16,source_pg:16,source_r:16,source_t:[3,16],sourcecod:26,space:3,spam:[1,18],special:27,specif:[0,3,4,8,9,14,17,18,20,22,23,25,26,28,32,34,37,38],specifi:[2,3,4,9,10,17,20,23,24,26,28,29,30,31,33,34,35,37],specifici:3,specifycol:3,spend:22,sphinx:12,split:3,split_full_table_nam:3,spoke:26,spoken:1,spreadsheet:20,spreadsheet_id:20,sq_type:26,sql:[3,11,15,20,22,32,34],sql_to_get_table_ddl:34,stack:34,staff:[],stage:[20,31],stai:27,standalon:34,standard:[3,12,26,33,34],start:[0,4,7,12,24,26,29,31],start_custom:5,start_dat:[8,14,26,29,37],start_invoic:5,start_tim:26,start_us:5,starting_aft:26,starting_befor:26,stat:34,state:[10,17,18,22,26,28,35],state_fip:34,state_nam:34,statement:[3,9,11,15,20,22,34],staten:34,statist:[3,14],statu:[1,14,23,26,27,35,37,39],statup:3,statupd:3,status:26,status_id:26,step:[22,26,34],stfp:[33,34],still:27,stop:3,storag:[3,7,12,22,26,34],store:[0,1,2,3,4,5,6,8,9,11,13,14,15,17,18,19,20,21,23,25,26,27,29,31,32,34,35,36,37,39],str:[0,1,2,3,4,5,6,7,8,10,11,14,15,16,17,18,19,20,21,24,26,27,28,29,30,31,32,33,34,35,36,37,39],straightforward:22,strateg:22,street:[10,26],street_nam:[26,35],street_numb:[26,35],strict:[14,34],string:[1,2,3,4,7,9,20,23,27,28,30,34],strongli:14,structur:[12,25,26,34],stuff:25,stusab:34,style:[3,11,12,15,20,34],sub:[3,9,22,23,26],subaccount:37,subdomain:[18,36],subfold:7,subfolder_file_list:7,subfolder_list:7,subject:[5,27],submit:[20,31,32],subscrib:[1,9,23,28],subsequ:8,subset:[3,34],succe:35,success:[0,3,26,29,32],successfulli:9,sue:20,suestion:26,suffix:[3,34],suggest:12,suit:[9,12],summar:14,summari:14,sup:29,supersecretkei:31,superus:3,suppli:[4,19,23,30],suppoort:26,support:[0,3,6,7,9,11,14,15,16,17,18,19,22,27,29,30,34,35,39],supported_ent:26,supporter_group_id:26,supportergroup:26,suppress:26,sure:[3,9,12,15,18,34],surfac:11,survey_question_id:26,survey_response_id:26,surveyquest:26,surveyquestionid:26,surveyrespons:26,surveyresponseid:26,suspend:37,sync:[1,15,22,24,28],synchron:15,syntax:[26,27,34],system:[4,5,17,20,22],tabl:[0,2,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,35,36,37,38,39],table01:2,table_dupl:3,table_exist:[3,15,20],table_import:11,table_nam:[2,3,9,15,20,34],table_obj:[3,11,20],table_of_id:8,table_sync_ful:16,table_sync_increment:16,tabledata:[],tablenam:[11,34],tag:[1,21,26,28],tag_1:1,tag_2:1,tag_id:[1,21],take:[0,3,9,12,22,26,29,31,34],taken:1,talk:22,target:[3,20,25],target_id:[25,26],target_t:3,targetsmart:22,targetsmartapi:35,targetsmartautom:35,task:3,task_for_distribut:3,tbd:20,tbl2:34,tbl3:34,tbl:[2,3,9,15,21,22,26,34],td_style:34,team:[12,26,27],teardown:9,technolog:22,tell:[12,17],temp:[3,34],temp_bucket_region:3,temp_file_compress:34,temp_file_path:4,temp_gcs_bucket:[],temp_s3_bucket:3,template_t:3,temporari:[3,4,20,33,34],term:[14,25],termin:3,territori:28,test1:4,test2:4,test:[3,4,8,20,31,35],test_data:3,test_environ:32,test_get_th:9,test_tabl:3,test_yourconnectornam:9,testcas:9,testyourconnector:9,text:[0,2,4,9,20,21,27,28],text_bank:24,textformat:20,than:[0,3,9,11,12,14,17,23,26,33,35],thank:0,thank_you_text:0,thei:[3,12,14,18,22,28],them:[3,4,8,9,11,12,13,14,15,16,17,18,19,20,21,22,26,28,29,31,32,34,35,36,37,39],themselv:20,therefor:26,thi:[0,1,2,3,4,5,6,7,8,9,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],thin:17,thing:[8,9,31],think:12,third:[3,6,8,9,12],this_month:37,thismethod:34,thompson:[3,15],those:[7,8,9,17,20,22,23,25,26,34],though:[0,3,32],thread:27,three:[3,7,8,26],threshold:14,thrill:12,through:[3,8,12,26,27,28,34],throughout:22,thumb:12,thursdai:12,ticket:18,ticket_typ:18,ticketcategori:26,tidi:13,tier:1,time:[0,3,8,9,12,13,14,22,23,24,26,27,30,31,34,37,38],time_period:37,time_slot_0:24,time_slot_1:24,timedelta:28,timeformat:3,timefram:23,timeout:[3,8,15,30,31],timeslot:24,timeslot_end:24,timeslot_start:24,timeslot_t:24,timeslots_t:24,timestamp:28,timestamp_opt:23,timestamp_signup:23,tip:20,titl:[0,20,27],tkn:27,tmc:[3,22,26,34],tmc_scratch:3,tmp:[4,19],tmp_gcs_bucket:20,to_civi:34,to_csv:[13,21,22,34],to_datafram:34,to_dict:34,to_email_address:5,to_html:34,to_json:34,to_postgr:[13,34],to_redshift:[21,22,26,34],to_s3_csv:[3,34],to_sftp_csv:34,to_zip_csv:34,toast:3,todai:[26,28,37],tofrom:34,togeth:[3,8,12,15],toggle_volunteer_act:26,token:[1,4,7,9,14,17,19,20,27,29,32,37,39],token_path:27,tokencredenti:4,toler:26,too:[9,22],tool:[1,5,12,19,22,24,25,28,31,32,36],tool_id:25,top:[3,8,9,20,34],total:[2,14],town:26,tr_style:34,track:[13,23],tracker:9,trade:22,tradit:14,trail:35,trailer:14,train:24,transact:[3,6,8,15,16,23],transaction_id:6,transactionsearch:8,transfer:3,transfer_bucket:3,translat:34,transpar:12,trash:32,treat:34,tri:34,tripl:9,troubl:12,truncat:[3,11,15,16,20,34,35],truncatecolumn:3,ts_api:35,ts_api_kei:35,ts_auto:35,ts_sftp_password:35,ts_sftp_usernam:35,tsmart:35,tstate:34,tupl:[3,34],turbovot:22,turbovote_password:36,turbovote_subdomain:36,turbovote_usernam:36,tweak:22,tweet:14,twilio:22,twilio_account_sid:37,twilio_auth_token:37,two:[3,5,6,9,10,14,16,26,30,34,35],txt:[9,12,22,27],txxxxxxx:27,type:[3,4,13,14,15,16,17,18,20,23,24,26,27,29,30,31,32,34,35,37,38,39],type_nam:26,typecast:2,typic:[3,7,9,20,27,34],udpat:29,uer:0,ul2vt:29,unabl:10,unauthent:19,under:[7,9,14,17,23,32,33,34],underli:[20,34],undesir:0,union:[3,4],union_al:3,union_t:3,uniqu:[3,10,12,20,23,26],unique_email_id:23,unit:[9,10,17],unittest:[9,22],univers:29,unix:[12,13,28],unknown:34,unless:[3,20,29,30,34],unlik:3,unlimit:22,unload:3,unmatch:26,unpack:[13,24,27,34],unpack_dict:34,unpack_list:34,unpack_nested_columns_as_row:34,unparsed_full_address:35,unrecogn:21,unrestrict:28,unsubscrib:[1,23],unsubscribed_sinc:23,until:[3,31,34],upars:35,upcom:39,updat:[0,1,2,3,6,12,21,24,28,29,32,34],update_:9,update_advoc:28,update_ag:21,update_campaign:0,update_cod:26,update_constitu:6,update_ev:[0,9,12],update_event_signup:0,update_flag_id:29,update_interact:6,update_lead:21,update_person:[1,9,26],update_person_json:26,update_record:[2,32],update_score_statu:26,update_signup:26,update_transact:6,update_us:0,updated_sinc:[18,24,28],upload:[0,3,4,7,11,15,20,22,26,27,34],upload_fil:[7,27],upload_saved_list:26,upload_scor:[26,38],upload_t:[4,7,20],uploaderror:0,upon:35,upsert:[3,32],upsert_person:26,upsert_person_json:26,upsert_record:32,upsert_result:32,uri:9,url:[0,2,3,4,5,9,20,26,27,30,34,35,38],url_kwarg:[26,38],url_post_typ:26,url_typ:[26,38],usag:[2,3,34],use:[0,2,3,4,5,6,7,8,9,10,11,12,14,17,18,19,20,22,23,24,25,26,27,28,31,32,33,34,35],used:[1,3,4,5,7,9,10,11,12,14,23,24,25,26,27,28,33,34,35],useful:[0,16,34,35],useful_resourc:12,user:[0,1,3,5,6,7,9,13,17,18,19,20,22,23,24,26,27,28,29,30,31,33,36,39],user_:0,user_api_kei:30,user_email:13,user_entered_valu:20,user_field:0,user_fields_onli:0,user_id:[0,6,27,39],user_nam:5,user_provided_onli:17,usernam:[0,3,5,15,19,25,29,32,33,34,35,36],users_t:17,uses:[4,9,12,19,20,22,26,33],using:[3,8,9,12,15,16,17,19,20,22,24,26,28,33,34,35],usp:35,usual:[3,29,32],utc:[4,14],utcnow:28,utf:3,util:[3,9,10,11,12,15,16,18,19,20,32,33],utilit:38,vacuum:3,valid:[0,3,4,7,10,14,15,17,18,20,21,25,26,27,34,35,36,39],valu:[0,1,2,3,9,14,15,16,17,20,21,22,23,26,27,28,30,32,34,35],value_column:3,van:[12,22,26,34],van_api_kei:26,van_connect:26,van_id:26,vanid:26,vanid_1:26,vanid_2:26,varchar:[3,34],varchar_max:3,varchar_width:3,vari:[14,26,28],variabl:[0,1,2,3,4,6,7,8,9,11,12,13,14,15,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,34,35,36,37,38,39],variat:23,varieti:[15,34],variou:[0,1,2,3,6,13,17,18,19,22,26,35],vb_tsmart_citi:17,vb_tsmart_dob:17,vb_tsmart_first_nam:17,vb_tsmart_last_nam:17,vb_tsmart_stat:17,vb_tsmart_zip:17,vb_vf_reg_cass_st:17,vb_vf_reg_citi:17,vb_vf_reg_stat:17,vb_vf_reg_zip:17,vb_vf_source_st:17,vb_voterbase_dob:17,vb_voterbase_phon:17,vb_voterbase_phone_wireless:17,venv:12,verb_noun:12,veri:34,verifi:[4,12,20,30],version:[9,22,23,26,27],via:[0,1,2,3,4,7,8,9,12,13,17,20,21,26,27,29,34,36,37,39],video:[14,39],view:[2,8,15,16,26],vimeo:14,vine:14,vintag:10,vip:23,vip_onli:23,virtual:24,visa:6,visibl:[17,24],vl2vt:29,vocabulari:12,voic:37,vol:29,volunt:[1,24,26],volunteer_activity_id:26,vote:[0,12,20,22,36],votebuild:[26,35],voter:[20,35,36],voter_reg:24,voter_registration_batch:26,voter_registration_check:35,voterbas:35,voterbase_email:17,voterregistrationbatch:26,vtd:29,wai:[3,8,12,20,27,34],wait:[0,11,31,34],want:[0,3,9,11,12,14,22,24,27,28,31,32,34],warehous:20,washington:[20,26],wast:22,watch:18,wayss:34,web:[9,15,22,39],webhook:[26,27],webhook_url:26,webhookurl:26,websit:[10,26],welcom:12,well:[3,9,10,21,32],were:[0,17,24,31],what:[9,17,27,34],whatev:[20,34],when:[0,1,3,4,5,7,8,9,12,15,16,17,20,22,27,28,30,33,34,38],where:[2,3,4,9,11,12,15,20,22,24,26,27,32,33,34],where_claus:3,whether:[0,3,5,14,15,17,20,24,26,28,31,34],which:[0,1,2,3,4,5,6,7,9,11,12,14,15,17,18,20,22,23,24,26,27,28,30,32,34,35,36,37],white:3,whitelist:3,whitespac:[17,34],who:[12,23,24],whole:3,whom:5,whose:[3,23,32],why:[],wide:3,wider:3,width:[3,34],wildcard:[3,35],win:3,window:[4,12],winning_formula:[3,15],winning_model:26,winning_scor:26,wipe:16,wish:[2,3,33],with_link:20,within:[3,4,9,20,26,34],without:[3,9,20,22,23,24,34],won:[9,17,22,34],word:[11,12,34],work:[0,3,9,12,16,20,22,26,34,38],workflow:12,worksheet:20,workspac:27,world:15,worri:[9,12,17],would:[3,12,30,34],wrap:9,wrapper:[3,9,17],write:[3,4,9,11,12,20,27,34],write_head:34,writer:[20,34],written:[3,9,12,22],www:26,xxport:26,year:[17,26],yearli:37,yellow:20,yesterdai:[28,37],yet:3,york:[0,6],you:[0,1,2,3,4,5,6,8,9,10,11,12,13,14,15,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39],your:[0,1,2,3,5,6,8,11,12,13,14,15,17,18,19,20,21,22,23,24,25,26,27,29,30,32,34,35,36,37,39],yourconnector:9,yourconnectornam:9,yourconnectorname_api_kei:9,youtub:14,yyyi:[14,26,29],yyyymmdd:[17,35],zappa:3,zero:[3,15],zip4:35,zip5:[26,28,35],zip:[0,17,26,28,33,34,35],zip_cod:[26,35],zipcod:[10,24],zoom:[12,22],zoom_api_kei:39,zoom_api_secret:39},titles:["ActionKit","Action Network","Airtable","Amazon Web Services","Azure: Blob Storage","Bill.com","Bloomerang","Box","Braintree","How to Build a Connector","US Census Geocoder","Civis","Contributing to Parsons","Copper","CrowdTangle","Databases","Database Sync","FacebookAds","Freshdesk","GitHub","Google","Hustle","About","Mailchimp","Mobilize America","New/Mode","NGPVAN","Notifications","Phone2Action","PDI","Redash","Rock the Vote","Salesforce","SFTP","Parsons Table","TargetSmart","TurboVote","Twilio","Utilities","Zoom"],titleterms:{"class":[9,29],"export":26,"final":9,"import":26,"new":25,Adding:[9,26],The:22,about:22,account:37,action:1,actionkit:0,activist:26,add:26,airtabl:2,amazon:3,america:24,api:[0,1,2,3,4,5,6,7,8,10,11,13,14,16,17,18,19,20,21,23,24,25,26,27,28,30,31,32,34,35,36,37,39],attribut:34,autom:[9,35],azur:4,basic:34,bigqueri:[15,20],bill:5,blob:4,bloomerang:6,box:7,braintre:8,build:9,bulk:26,canvass:26,censu:10,chang:[12,26],civi:11,civic:20,cloud:[20,38],code:[12,26],com:5,common:26,connector:9,contribut:12,convent:12,cooper:22,copper:13,core:3,creat:26,crowdtangl:14,custom:26,databas:[15,16],depend:12,design:22,document:[9,12],entiti:26,environ:12,event:26,exampl:34,facebookad:17,field:26,file:26,finish:9,folder:26,freshdesk:18,from:34,geocod:10,get:[9,37],github:19,gmail:27,goal:22,googl:[15,20],group:26,how:9,hustl:21,inbound:[],index:34,indic:22,initi:9,instal:[12,22],issu:12,job:26,lambda:3,lazi:34,licens:22,lint:12,list:26,load:[26,34],local:9,locat:26,log:22,mailchimp:23,make:12,messag:[],method:9,minim:22,mobil:24,mode:25,modifi:26,movement:22,mysql:15,network:1,ngpvan:26,notif:27,outbound:[],overview:[0,1,2,3,4,5,6,7,8,10,11,13,14,15,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,34,35,36,37,39],parson:[12,34],pattern:9,pdi:29,peopl:26,petl:34,phone2act:28,pipelin:34,postgr:15,pull:12,question:26,quick:[1,5,8,15,16,21,23,25,28,32,37,39],quickstart:[0,2,3,4,6,7,11,13,14,17,18,19,20,22,24,26,27,29,30,31,35,36],redash:30,redshift:[3,15],request:12,resourc:22,respons:26,rock:31,salesforc:32,sampl:12,save:26,schema:3,score:26,servic:3,sftp:33,sheet:20,signup:26,slack:27,sourc:22,start:[1,5,8,9,15,16,21,23,25,28,32,37,39],step:9,storag:[4,20,38],submit:12,support:26,survei:26,sync:16,tabl:[3,22,34],target:26,targetsmart:35,test:[9,12],transform:34,turbovot:36,twilio:37,unit:12,updat:26,usag:[22,37],util:[22,38],view:3,virtual:12,vote:31,web:3,workflow:26,your:9,zoom:39}}) \ No newline at end of file diff --git a/docs/html/sftp.html b/docs/html/sftp.html new file mode 100644 index 0000000000..0f77ac6c2c --- /dev/null +++ b/docs/html/sftp.html @@ -0,0 +1,463 @@ + + + + + + + + + + + SFTP — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

SFTPÂś

+

The SFTP class allows you to interact with SFTP services.

+

It uses the Paramiko SFTP library under the hood.

+
+
+class parsons.SFTP(host, username, password, port=22, rsa_private_key_file=None)[source]Âś
+

Instantiate SFTP Class

+
+
Args:
+
+
host: str
+
The host name
+
username: str
+
The user name
+
password: str
+
The password
+
rsa_private_key_file str
+
Absolute path to a private RSA key used +to authenticate stfp connection
+
port: int
+
Specify if different than the standard port 22
+
+
+
Returns:
+
SFTP Class
+
+
+
+create_connection()[source]Âś
+

Create an SFTP connection. You can then utilize this in a with block +and it will close the connection when it is out of scope. You should use +this when you wish to batch multiple methods using a single connection.

+
import SFTP
+
+sftp = SFTP()
+connection = sftp.create_connection()
+
+with connection as conn:
+    sftp.make_directory('my_dir', connection=conn)
+    sftp.put_file('my_csv.csv', connection=conn)
+
+
+
+
Returns:
+
SFTP Connection object
+
+
+ +
+
+list_directory(remote_path='.', connection=None)[source]Âś
+

List the contents of a directory

+
+
Args:
+
+
remote_path: str
+
The remote path of the directory
+
connection: obj
+
An SFTP connection object
+
+
+
Returns:
+
list
+
+
+ +
+
+make_directory(remote_path, connection=None)[source]Âś
+

Makes a new directory on the SFTP server

+
+
Args:
+
+
remote_path: str
+
The remote path of the directory
+
connection: obj
+
An SFTP connection object
+
+
+
+
+ +
+
+remove_directory(remote_path, connection=None)[source]Âś
+

Remove a directory from the SFTP server

+
+
Args:
+
+
remote_path: str
+
The remote path of the directory
+
connection: obj
+
An SFTP connection object
+
+
+
+
+ +
+
+get_file(remote_path, local_path=None, connection=None)[source]Âś
+

Download a file from the SFTP server

+
+
Args:
+
+
remote_path: str
+
The remote path of the file to download
+
local_path: str
+
The local path where the file will be downloaded. If not specified, a temporary +file will be created and returned, and that file will be removed automatically +when the script is done running.
+
connection: obj
+
An SFTP connection object
+
+
+
Returns:
+
+
str
+
The path of the local file
+
+
+
+
+ +
+
+get_table(remote_path, connection=None)[source]Âś
+

Download a csv from the server and convert into a Parsons table.

+

The file may be compressed with gzip, or zip, but may not contain +multiple files in the archive.

+
+
Args:
+
+
remote_path: str
+
The remote path of the file to download
+
connection: obj
+
An SFTP connection object
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+put_file(local_path, remote_path, connection=None)[source]Âś
+

Put a file on the SFTP server +Args:

+
+
+
local_path: str
+
The local path of the source file
+
remote_path: str
+
The remote path of the new file
+
connection: obj
+
An SFTP connection object
+
+
+
+ +
+
+remove_file(remote_path, connection=None)[source]Âś
+

Delete a file on the SFTP server

+
+
Args:
+
+
remote_path: str
+
The remote path of the file
+
connection: obj
+
An SFTP connection object
+
+
+
+
+ +
+
+get_file_size(remote_path, connection=None)[source]Âś
+

Get the size of a file in MB on the SFTP server. The file is +not downloaded locally.

+
+
Args:
+
+
remote_path: str
+
The remote path of the file
+
connection: obj
+
An SFTP connection object
+
+
+
Returns:
+
+
int
+
The file size in MB.
+
+
+
+
+ +
+ +
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/table.html b/docs/html/table.html new file mode 100644 index 0000000000..623bd548f7 --- /dev/null +++ b/docs/html/table.html @@ -0,0 +1,1960 @@ + + + + + + + + + + + Parsons Table — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

Parsons TableÂś

+
+

OverviewÂś

+

Most methods and functions in Parsons return a Table, which is a 2D list-like object similar to a Pandas Dataframe. You can call the following methods on the Table object to output it into a variety of formats or storage types. A full list of Table methods can be found in the API section.

+
+

From Parsons TableÂś

+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodDestination TypeDescription
to_csv()CSV FileWrite a table to a local csv file
to_s3_csv()AWS s3 BucketWrite a table to a csv stored in S3
to_sftp_csv()SFTP ServerWrite a table to a csv stored on an SFTP server
from_csv()A Redshift DatabaseWrite a table to a Redshift database
from_postgres()A Postgres DatabaseWrite a table to a Postgres database
to_civis()Civis Redshift DatabaseWrite a table to Civis platform database
from_petl()Petl table objectConvert a table a Petl table object
to_json()JSON fileWrite a table to a local JSON file
to_html()HTML formatted tableWrite a table to a local html file
to_dataframe()Pandas Dataframe [1]Return a Pandas dataframe
+ + + + + +
[1]Requires optional installation of Pandas package by running pip install pandas.
+
+
+

To Parsons TableÂś

+

Create Parsons Table object using the following methods.

+ +++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodSource TypeDescription
from_csv()File like object, local path, url, ftp.Loads a csv object into a Table
from_json()File like object, local path, url, ftp.Loads a json object into a Table
from_columns()List objectLoads lists organized as columns in Table
from_redshift()Redshift tableLoads a Redshift query into a Table
from_postgres()Postgres tableLoads a Postgres query into a Table
from_dataframe()Pandas Dataframe [2]Load a Parsons table from a Pandas Dataframe
from_s3_csv()S3 CSVLoad a Parsons table from a csv file on S3
+ + + + + +
[2]Requires optional installation of Pandas package by running pip install pandas.
+

You can also use the Table constructor to create a Table from a python list or petl table:

+
# From a list of dicts
+tbl = Table([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}])
+
+# From a list of lists, the first list holding the field names
+tbl = Table([['a', 'b'], [1, 2], [3, 4]])
+
+# From a petl table
+tbl = Table(petl_tbl)
+
+
+
+
+

Parsons Table AttributesÂś

+

Tables have a number of convenience attributes.

+ ++++ + + + + + + + + + + + + + + + + + + + +
AttributeDescription
.num_rowsThe number of rows in the table
.columnsA list of column names in the table
.dataThe actual data (rows) of the table, as a list of tuples (without field names)
.firstThe first value in the table. Use for database queries where a single value is returned.
+
+
+

Parsons Table TransformationsÂś

+

Parsons tables have many methods that allow you to easily transform tables. Below is a selection +of commonly used methods. The full list can be found in the API section.

+

Column Transformations

+ ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodDescription
add_column()Add a column
remove_column()Remove a column
rename_column()Rename a column
move_column()Move a column within a table
cut()Return a table with a subset of columns
fill_column()Provide a fixed value to fill a column
fillna_column()Provide a fixed value to fill all null values in a column
get_column_types()Get the python type of values for a given column
convert_column()Transform the values of a column via arbitrary functions
coalesce_columns()Coalesce values from one or more source columns
map_columns()Standardizes column names based on multiple possible values
+

Row Transformations

+ ++++ + + + + + + + + + + + + + + + + + + + +
MethodDescription
select_rows()Return a table of a subset of rows based on filters
stack()Stack a number of tables on top of one another
chunk()Divide tables into smaller tables based on row count
remove_null_rows()Removes rows with null values in specified columns
+

Extraction and Reshaping

+ ++++ + + + + + + + + + + + + + + + + + + + +
MethodDescription
unpack_dict()Unpack dictionary values from one column to top level columns
unpack_list()Unpack list values from one column and add to top level columns
long_table()Take a column with nested data and create a new long table
unpack_nested_columns_as_rows()Unpack list or dict values from one column into separate rows
+
+
+

Parsons Table IndexingÂś

+

To access rows and columns of data within a Parsons table, you can index on them. To access a column +pass in the column name as a string (e.g. tbl['a']) and to access a row, pass in the row index as +an integer (e.g. tbl[1]).

+
tbl = Table([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}])
+
+# Return a column as a list
+tbl['a']
+>> [1, 3]
+
+# Return a row as a dict
+tbl[1]
+>> {'a': 3, 'b': 4}
+
+
+

A note on indexing and iterating over a table’s data: +If you need to iterate over the data, make sure to use the python iterator syntax, so any data transformations can be applied efficiently. An example:

+
# Some data transformations
+table.add_column('newcol', 'some value')
+
+# Efficient way to grab all the data (applying the data transformations only once)
+rows_list = [row for row in table]
+
+
+
+

Warning

+

If you must index directly into a table’s data, you can do so, but note that data transformations will be applied each time you do so. So this code will be very inefficient on a large table…

+
+
# Inefficient way to grab all the data
+rows_list = []
+for i in range(0, table.num_rows):
+  # Data transformations will be applied each time through this loop!
+  rows_list.append(table[i])
+
+
+
+
+

PETLÂś

+

The Parsons Table relies heavily on the petl Python package. You can always access the underlying petl table with my_parsons_table.table, which will allow you to perform any petl-supported ETL operations.

+
+
+

Lazy LoadingÂś

+

The Parsons Table makes use of “lazy” loading and “lazy” transformations. What this means is that it tries not to load and process your data until absolutely necessary.

+

An example:

+
# Specify where to load the data
+tbl = Table.from_csv('name_data.csv')
+
+# Specify data transformations
+tbl.add_column('full_name', lambda row: row['first_name'] + ' ' + row['last_name'])
+tbl.remove_column(['first_name', 'last_name'])
+
+# Save the table elsewhere
+# IMPORTANT - The CSV won't actually be loaded and transformed until this step,
+# since this is the first time it's actually needed.
+tbl.to_redshift('main.name_table')
+
+
+

This “lazy” loading can be very convenient and performant. However, it can make issues hard to debug. Eg. if your data transformations are time-consuming, you won’t actually notice that performance hit until you try to use the data, potentially much later in your code.

+

So just be aware of this behavior.

+
+
+
+

ExamplesÂś

+
+

Basic PipelinesÂś

+
# S3 to Civis
+s3 = S3()
+csv = s3.get_file('tmc-bucket', 'my_ids.csv')
+Table.from_csv(csv).to_civis('TMC','ids.my_ids')
+
+#VAN Activist Codes to a Dataframe
+van = VAN(db='MyVoters')
+van.activist_codes().to_dataframe()
+
+#VAN Events to an s3 bucket
+van = VAN(db='MyVoters')
+van.events().to_s3_csv('my-van-bucket','myevents.csv')
+
+
+
+
+
+

To & From APIÂś

+
+
+class parsons.etl.tofrom.ToFrom[source]Âś
+
+
+to_dataframe(index=None, exclude=None, columns=None, coerce_float=False)[source]Âś
+

Outputs table as a Pandas Dataframe

+
+
Args:
+
+
index: str, list
+
Field of array to use as the index, alternately a specific set +of input labels to use
+
exclude: list
+
Columns or fields to exclude
+
columns: list
+
Column names to use. If the passed data do not have names +associated with them, this argument provides names for the +columns. Otherwise this argument indicates the order of the +columns in the result (any names not found in the data will +become all-NA columns)
+
+
+
Returns:
+
+
dataframe
+
Pandas DataFrame object
+
+
+
+
+ +
+
+to_html(local_path=None, encoding=None, errors='strict', index_header=False, caption=None, tr_style=None, td_styles=None, truncate=None)[source]Âś
+

Outputs table to html.

+
+

Warning

+

If a file already exists at the given location, it will be +overwritten.

+
+
+
Args:
+
+
local_path: str
+
The path to write the html locally. If not specified, a temporary file will be +created and returned, and that file will be removed automatically when the script +is done running.
+
encoding: str
+
The encoding type for csv.writer()
+
errors: str
+
Raise an Error if encountered
+
index_header: boolean
+
Prepend index to column names; Defaults to False.
+
caption: str
+
A caption to include with the html table.
+
tr_style: str or callable
+
Style to be applied to the table row.
+
td_styles: str, dict or callable
+
Styles to be applied to the table cells.
+
truncate: int
+
Length of cell data.
+
+
+
Returns:
+
+
str
+
The path of the new file
+
+
+
+
+ +
+
+to_csv(local_path=None, temp_file_compression=None, encoding=None, errors='strict', write_header=True, csv_name=None, **csvargs)[source]Âś
+

Outputs table to a CSV. Additional key word arguments are passed to csv.writer(). So, +e.g., to override the delimiter from the default CSV dialect, provide the delimiter +keyword argument.

+
+

Warning

+

If a file already exists at the given location, it will be +overwritten.

+
+
+
Args:
+
+
local_path: str
+
The path to write the csv locally. If it ends in “.gz” or “.zip”, the file will be +compressed. If not specified, a temporary file will be created and returned, +and that file will be removed automatically when the script is done running.
+
temp_file_compression: str
+
If a temp file is requested (ie. no local_path is specified), the compression +type for that file. Currently “None”, “gzip” or “zip” are supported. +If a local_path is specified, this argument is ignored.
+
encoding: str
+
The CSV encoding type for csv.writer()
+
errors: str
+
Raise an Error if encountered
+
write_header: boolean
+
Include header in output
+
csv_name: str
+
If zip compression (either specified or inferred), the name of csv file +within the archive.
+
**csvargs: kwargs
+
csv_writer optional arguments
+
+
+
Returns:
+
+
str
+
The path of the new file
+
+
+
+
+ +
+
+append_csv(local_path, encoding=None, errors='strict', **csvargs)[source]Âś
+

Appends table to an existing CSV.

+

Additional additional key word arguments +are passed to csv.writer(). So, e.g., to override the delimiter +from the default CSV dialect, provide the delimiter keyword argument.

+
+
Args:
+
+
local_path: str
+
The local path of an existing CSV file. If it ends in “.gz”, the file will +be compressed.
+
encoding: str
+
The CSV encoding type for csv.writer()
+
errors: str
+
Raise an Error if encountered
+
**csvargs: kwargs
+
csv_writer optional arguments
+
+
+
Returns:
+
+
str
+
The path of the file
+
+
+
+
+ +
+
+to_zip_csv(archive_path=None, csv_name=None, encoding=None, errors='strict', write_header=True, if_exists='replace', **csvargs)[source]Âś
+

Outputs table to a CSV in a zip archive. Additional key word arguments are passed to +csv.writer(). So, e.g., to override the delimiter from the default CSV dialect, +provide the delimiter keyword argument. Use thismethod if you would like to write +multiple csv files to the same archive.

+
+

Warning

+

If a file already exists in the archive, it will be overwritten.

+
+
+
Args:
+
+
archive_path: str
+
The path to zip achive. If not specified, a temporary file will be created and +returned, and that file will be removed automatically when the script is done +running.
+
csv_name: str
+
The name of the csv file to be stored in the archive. If None will use +the archive name.
+
encoding: str
+
The CSV encoding type for csv.writer()
+
errors: str
+
Raise an Error if encountered
+
write_header: boolean
+
Include header in output
+
if_exists: str
+
If archive already exists, one of ‘replace’ or ‘append’
+
**csvargs: kwargs
+
csv_writer optional arguments
+
+
+
Returns:
+
+
str
+
The path of the archive
+
+
+
+
+ +
+
+to_json(local_path=None, temp_file_compression=None, line_delimited=False)[source]Âś
+

Outputs table to a JSON file

+
+

Warning

+

If a file already exists at the given location, it will be +overwritten.

+
+
+
Args:
+
+
local_path: str
+
The path to write the JSON locally. If it ends in “.gz”, it will be +compressed first. If not specified, a temporary file will be created and returned, +and that file will be removed automatically when the script is done running.
+
temp_file_compression: str
+
If a temp file is requested (ie. no local_path is specified), the compression +type for that file. Currently “None” and “gzip” are supported. +If a local_path is specified, this argument is ignored.
+
line_delimited: bool
+
Whether the file will be line-delimited JSON (with a row on each line), or a proper +JSON file.
+
+
+
Returns:
+
+
str
+
The path of the new file
+
+
+
+
+ +
+
+to_dicts()[source]Âś
+

Output table as a list of dicts.

+
+
Returns:
+
list
+
+
+ +
+
+to_sftp_csv(remote_path, host, username, password, port=22, encoding=None, compression=None, errors='strict', write_header=True, rsa_private_key_file=None, **csvargs)[source]Âś
+

Writes the table to a CSV file on a remote SFTP server

+
+
Args:
+
+
remote_path: str
+
The remote path of the file. If it ends in ‘.gz’, the file will be compressed.
+
host: str
+
The remote host
+
username: str
+
The username to access the SFTP server
+
password: str
+
The password to access the SFTP server
+
port: int
+
The port number of the SFTP server
+
encoding: str
+
The CSV encoding type for csv.writer()
+
errors: str
+
Raise an Error if encountered
+
write_header: boolean
+
Include header in output
+
rsa_private_key_file str
+
Absolute path to a private RSA key used +to authenticate stfp connection
+
**csvargs: kwargs
+
csv_writer optional arguments
+
+
+
+
+ +
+
+to_s3_csv(bucket, key, aws_access_key_id=None, aws_secret_access_key=None, compression=None, encoding=None, errors='strict', write_header=True, acl='bucket-owner-full-control', public_url=False, public_url_expires=3600, **csvargs)[source]Âś
+

Writes the table to an s3 object as a CSV

+
+
Args:
+
+
bucket: str
+
The s3 bucket to upload to
+
key: str
+
The s3 key to name the file. If it ends in ‘.gz’ or ‘.zip’, the file will be +compressed.
+
aws_access_key_id: str
+
Required if not included as environmental variable
+
aws_secret_access_key: str
+
Required if not included as environmental variable
+
compression: str
+
The compression type for the s3 object. Currently “None”, “zip” and “gzip” are +supported. If specified, will override the key suffix.
+
encoding: str
+
The CSV encoding type for csv.writer()
+
errors: str
+
Raise an Error if encountered
+
write_header: boolean
+
Include header in output
+
public_url: boolean
+
Create a public link to the file
+
public_url_expire: 3600
+
The time, in seconds, until the url expires if public_url set to True.
+
acl: str
+
The S3 permissions on the file
+
**csvargs: kwargs
+
csv_writer optional arguments
+
+
+
Returns:
+
Public url if specified. If not None.
+
+
+ +
+
+to_redshift(table_name, username=None, password=None, host=None, db=None, port=None, **copy_args)[source]Âś
+

Write a table to a Redshift database. Note, this requires you to pass +AWS S3 credentials or store them as environmental variables.

+
+
Args:
+
+
table_name: str
+
The table name and schema (my_schema.my_table) to point the file.
+
username: str
+
Required if env variable REDSHIFT_USERNAME not populated
+
password: str
+
Required if env variable REDSHIFT_PASSWORD not populated
+
host: str
+
Required if env variable REDSHIFT_HOST not populated
+
db: str
+
Required if env variable REDSHIFT_DB not populated
+
port: int
+
Required if env variable REDSHIFT_PORT not populated. Port 5439 is typical.
+
**copy_args: kwargs
+
See copy`() for options.
+
+
+
Returns:
+
None
+
+
+ +
+
+to_postgres(table_name, username=None, password=None, host=None, db=None, port=None, **copy_args)[source]Âś
+

Write a table to a Postgres database.

+
+
Args:
+
+
table_name: str
+
The table name and schema (my_schema.my_table) to point the file.
+
username: str
+
Required if env variable PGUSER not populated
+
password: str
+
Required if env variable PGPASSWORD not populated
+
host: str
+
Required if env variable PGHOST not populated
+
db: str
+
Required if env variable PGDATABASE not populated
+
port: int
+
Required if env variable PGPORT not populated.
+
**copy_args: kwargs
+
See copy`() for options.
+
+
+
Returns:
+
None
+
+
+ +
+
+to_civis(table, api_key=None, db=None, max_errors=None, existing_table_rows='fail', diststyle=None, distkey=None, sortkey1=None, sortkey2=None, wait=True, **civisargs)[source]Âś
+

Write the table to a Civis Redshift cluster. Additional key word +arguments can passed to civis.io.dataframe_to_civis() # noqa: E501

+
+
Args
+
+
table: str
+
The schema and table you want to upload to. E.g., +‘scratch.table’. Schemas or tablenames with periods must be +double quoted, e.g. ‘scratch.”my.table”’.
+
api_key: str
+
Your Civis API key. If not given, the CIVIS_API_KEY environment +variable will be used.
+
db: str or int
+
The Civis Database. Can be database name or ID
+
max_errors: int
+
The maximum number of rows with errors to remove from +the import before failing.
+
diststyle: str
+
The distribution style for the table. One of ‘even’, ‘all’ +or ‘key’.
+
existing_table_rows: str
+
The behaviour if a table with the requested name already +exists. One of ‘fail’, ‘truncate’, ‘append’ or ‘drop’. +Defaults to ‘fail’.
+
distkey: str
+
The column to use as the distkey for the table.
+
sortkey1: str
+
The column to use as the sortkey for the table.
+
sortkey2: str
+
The second column in a compound sortkey for the table.
+
wait: boolean
+
Wait for write job to complete before exiting method.
+
+
+
+
+ +
+
+classmethod from_csv(local_path, **csvargs)[source]Âś
+

Create a parsons table object from a CSV file

+
+
Args:
+
+
local_path: obj
+
A csv formatted local path, url or ftp. If this is a +file path that ends in “.gz”, the file will be decompressed first.
+
**csvargs: kwargs
+
csv_reader optional arguments
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+classmethod from_csv_string(str, **csvargs)[source]Âś
+

Create a parsons table object from a string representing a CSV.

+
+
Args:
+
+
str: str
+
The string object to convert to a table
+
**csvargs: kwargs
+
csv_reader optional arguments
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+classmethod from_columns(cols, header=None)[source]Âś
+

Create a parsons table from a list of lists organized as columns

+
+
Args:
+
+
cols: list
+
A list of lists organized as columns
+
header: list
+
List of column names. If not specified, will use dummy column names
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+classmethod from_json(local_path, header=None, line_delimited=False)[source]Âś
+

Create a parsons table from a json file

+
+
Args:
+
+
local_path: list
+
A JSON formatted local path, url or ftp. If this is a +file path that ends in “.gz”, the file will be decompressed first.
+
header: list
+
List of columns to use for the destination table. If omitted, columns will +be inferred from the initial data in the file.
+
line_delimited: bool
+
Whether the file is line-delimited JSON (with a row on each line), or a proper +JSON file.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+classmethod from_redshift(sql, username=None, password=None, host=None, db=None, port=None)[source]Âś
+

Create a parsons table from a Redshift query.

+

To pull an entire Redshift table, use a query like SELECT * FROM tablename.

+
+
Args:
+
+
sql: str
+
A valid SQL statement
+
username: str
+
Required if env variable REDSHIFT_USERNAME not populated
+
password: str
+
Required if env variable REDSHIFT_PASSWORD not populated
+
host: str
+
Required if env variable REDSHIFT_HOST not populated
+
db: str
+
Required if env variable REDSHIFT_DB not populated
+
port: int
+
Required if env variable REDSHIFT_PORT not populated. Port 5439 is typical.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+classmethod from_postgres(sql, username=None, password=None, host=None, db=None, port=None)[source]Âś
+
+
Args:
+
+
sql: str
+
A valid SQL statement
+
username: str
+
Required if env variable PGUSER not populated
+
password: str
+
Required if env variable PGPASSWORD not populated
+
host: str
+
Required if env variable PGHOST not populated
+
db: str
+
Required if env variable PGDATABASE not populated
+
port: int
+
Required if env variable PGPORT not populated.
+
+
+
+
+ +
+
+classmethod from_s3_csv(bucket, key, from_manifest=False, aws_access_key_id=None, aws_secret_access_key=None, **csvargs)[source]Âś
+

Create a parsons table from a key in an S3 bucket.

+
+
Args:
+
+
bucket: str
+
The S3 bucket.
+
key: str
+
The S3 key
+
from_manifest: bool
+
If True, treats key as a manifest file and loads all urls into a parsons.Table. +Defaults to False.
+
aws_access_key_id: str
+
Required if not included as environmental variable.
+
aws_secret_access_key: str
+
Required if not included as environmental variable.
+
**csvargs: kwargs
+
csv_reader optional arguments
+
+
+
Returns:
+
parsons.Table object
+
+
+ +
+
+classmethod from_dataframe(dataframe, include_index=False)[source]Âś
+

Create a parsons table from a Pandas dataframe.

+
+
Args:
+
+
dataframe: dataframe
+
A valid Pandas dataframe objectt
+
include_index: boolean
+
Include index column
+
+
+
+
+ +
+ +
+
+

Transformation APIÂś

+

The following methods allow you to manipulate the Parsons table data.

+
+
+class parsons.etl.etl.ETL[source]Âś
+
+
+add_column(column, value=None, index=None)[source]Âś
+

Add a column to your table

+
+
Args:
+
+
column: str
+
Name of column to add
+
value:
+
A fixed or calculated value
+
index: int
+
The position of the new column in the table
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+
+remove_column(*columns)[source]Âś
+

Remove a column from your table

+
+
Args:
+
+
*columns: str
+
Column names
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+
+rename_column(column_name, new_column_name)[source]Âś
+

Rename a column

+
+
Args:
+
+
column_name: str
+
The current column name
+
new_column_name: str
+
The new column name
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+
+fill_column(column_name, fill_value)[source]Âś
+

Fill a column in a table

+
+
Args:
+
+
column_name: str
+
The column to fill
+
fill_value:
+
A fixed or calculated value
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+
+fillna_column(column_name, fill_value)[source]Âś
+

Fill None values in a column in a table

+
+
Args:
+
+
column_name: str
+
The column to fill
+
fill_value:
+
Fixed value only
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+
+move_column(column, index)[source]Âś
+

Move a column

+
+
Args:
+
+
column: str
+
The column name to move
+
index:
+
The new index for the column
+
+
+
Returns:
+
Parsons Table and also updates existing object.
+
+
+ +
+
+convert_column(*column, **kwargs)[source]Âś
+

Transform values under one or more fields via arbitrary functions, method +invocations or dictionary translations. This leverages the petl convert() +method. Example usage can be found here.

+
+
Args:
+
+
*column: str
+
A single column or multiple columns passed as a list
+
**kwargs: str, method or variable
+
The update function, method, or variable to process the update
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+
+get_column_max_width(column)[source]Âś
+

Return the maximum width of the column.

+
+
Args:
+
+
column: str
+
The column name.
+
+
+
Returns:
+
int
+
+
+ +
+
+convert_columns_to_str()[source]Âś
+

Convenience function to convert all non-string or mixed columns in a +Parsons table to string (e.g. for comparison)

+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+
+coalesce_columns(dest_column, source_columns, remove_source_columns=True)[source]Âś
+

Coalesces values from one or more source columns into a destination column, by selecting +the first non-empty value. If the destination column doesn’t exist, it will be added.

+
+
Args:
+
+
dest_column: str
+
Name of destination column
+
source_columns: list
+
List of source column names
+
remove_source_columns: bool
+
Whether to remove the source columns after the coalesce. If the destination +column is also one of the source columns, it will not be removed.
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+
+map_columns(column_map)[source]Âś
+

Standardizes column names based on multiple possible values. This method +is helpful when your input table might have multiple and unknown column +names.

+
+
Args:
+
+
column_map: dict
+
A dictionary of columns and possible values that map to it
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
tbl = [{fn: 'Jane'},
+       {lastname: 'Doe'},
+       {dob: '1980-01-01'}]
+column_map = {first_name: ['fn', 'first', 'firstname'],
+              last_name: ['ln', 'last', 'lastname'],
+              date_of_birth: ['dob', 'birthday']}
+tbl.map_columns(column_map)
+print (tbl)
+>> {{first_name: 'Jane', last_name: 'Doe', 'date_of_birth': '1908-01-01'}}
+
+
+
+ +
+
+map_and_coalesce_columns(column_map)[source]Âś
+

Coalesces columns based on multiple possible values. The columns in the map +do not need to be in your table, so you can create a map with all possibilities. +The coalesce will occur in the order that the columns are listed, unless the +destination column name already exists in the table, in which case that +value will be preferenced. This method is helpful when your input table might +have multiple and unknown column names. +Args:

+
+
+
column_map: dict
+
A dictionary of columns and possible values that map to it
+
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
tbl = [{first: None},
+       {fn: 'Jane'},
+       {lastname: 'Doe'},
+       {dob: '1980-01-01'}]
+
+column_map = {first_name: ['fn', 'first', 'firstname'],
+              last_name: ['ln', 'last', 'lastname'],
+              date_of_birth: ['dob', 'birthday']}
+
+tbl.map_and_coalesce_columns(column_map)
+
+print (tbl)
+>> {{first_name: 'Jane', last_name: 'Doe', 'date_of_birth': '1908-01-01'}}
+
+
+
+ +
+
+get_column_types(column)[source]Âś
+

Return all of the Python types for values in a given column

+
+
Args:
+
+
column: str
+
Name of the column to analyze
+
+
+
Returns:
+
+
list
+
A list of Python types
+
+
+
+
+ +
+
+get_columns_type_stats()[source]Âś
+

Return descriptive stats for all columns

+
+
Returns:
+
+
list
+
A list of dicts
+
+
+
Returns:
+
+
list
+
A list of dicts, each containing a column ‘name’ and a ‘type’ list
+
+
+
+
+ +
+
+convert_table(*args)[source]Âś
+

Transform all cells in a table via arbitrary functions, method invocations or dictionary +translations. This method is useful for cleaning fields and data hygiene functions such +as regex. This method leverages the petl convert() method. Example usage can be +found here <https://petl.readthedocs.io/en/v0.24/transform.html#petl.convert>`_.

+
+
Args:
+
+
*args: str, method or variable
+
The update function, method, or variable to process the update. Can also
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+
+unpack_dict(column, keys=None, include_original=False, sample_size=1000, missing=None, prepend=True, prepend_value=None)[source]Âś
+

Unpack dictionary values from one column into separate columns

+
+
Args:
+
+
column: str
+
The column name to unpack
+
keys: list
+
The dict keys in the column to unpack. If None will unpack +all.
+
include_original: boolean
+
Retain original column after unpacking
+
sample_size: int
+
Number of rows to sample before determining columns
+
missing: str
+
If a value is missing, the value to fill it with
+
prepend:
+
Prepend the column name of the unpacked values. Useful for +avoiding duplicate column names
+
prepend_value:
+
Value to prepend new columns if prepend=True. If None, will +set to column name.
+
+
+
+
+ +
+
+unpack_list(column, include_original=False, missing=None, replace=False, max_columns=None)[source]Âś
+

Unpack list values from one column into separate columns. Numbers the +columns.

+
# Begin with a list in column
+json = [{'id': '5421',
+         'name': 'Jane Green',
+         'phones': ['512-699-3334', '512-222-5478']
+        }
+       ]
+
+tbl = Table(json)
+print (tbl)
+>>> {'id': '5421', 'name': 'Jane Green', 'phones': ['512-699-3334', '512-222-5478']}
+
+tbl.unpack_list('phones', replace=True)
+print (tbl)
+>>> {'id': '5421', 'name': 'Jane Green', 'phones_0': '512-699-3334', 'phones_1': '512-222-5478'} # noqa: E501
+
+
+
+
Args:
+
+
column: str
+
The column name to unpack
+
include_original: boolean
+
Retain original column after unpacking
+
sample_size: int
+
Number of rows to sample before determining columns
+
missing: str
+
If a value is missing, the value to fill it with
+
replace: boolean
+
Return new table or replace existing
+
max_columns: int
+
The maximum number of columns to unpack
+
+
+
Returns:
+
None
+
+
+ +
+
+unpack_nested_columns_as_rows(column, key='id', expand_original=False)[source]Âś
+

Unpack list or dict values from one column into separate rows. +Not recommended for JSON columns (i.e. lists of dicts), but can handle columns +with any mix of types. Makes use of PETL’s melt() method.

+
+
Args:
+
+
column: str
+
The column name to unpack
+
key: str
+
The column to use as a key when unpacking. Defaults to id
+
expand_original: boolean or int
+
If True: Add resulting unpacked rows (with all other columns) to original +If int: Add to original unless the max added per key is above the given number +If False (default): Return unpacked rows (with key column only) as standalone +Removes packed list and dict rows from original either way.
+
+
+
Returns:
+
If expand_original, original table with packed rows replaced by unpacked rows +Otherwise, standalone table with key column and unpacked values only
+
+
+ +
+
+long_table(key, column, key_rename=None, retain_original=False, prepend=True, prepend_value=None)[source]Âś
+

Create a new long parsons table from a column, including the foreign +key.

+
# Begin with nested dicts in a column
+json = [{'id': '5421',
+         'name': 'Jane Green',
+         'emails': [{'home': 'jane@gmail.com'},
+                    {'work': 'jane@mywork.com'}
+                   ]
+        }
+       ]
+tbl = Table(json)
+print (tbl)
+>>> {'id': '5421', 'name': 'Jane Green', 'emails': [{'home': 'jane@gmail.com'}, {'work': 'jane@mywork.com'}]} # noqa: E501
+>>> {'id': '5421', 'name': 'Jane Green', 'emails': [{'home': 'jane@gmail.com'}, {'work': 'jane@mywork.com'}]} # noqa: E501
+
+# Create skinny table of just the nested dicts
+email_skinny = tbl.long_table(['id'], 'emails')
+
+print (email_skinny)
+>>> {'id': '5421', 'emails_home': 'jane@gmail.com', 'emails_work': None}
+>>> {'id': '5421', 'emails_home': None, 'emails_work': 'jane@mywork.com'}
+
+
+
+
Args:
+
+
key: lst
+
The columns to retain in the long table (e.g. foreign keys)
+
column: str
+
The column name to make long
+
key_rename: dict
+
The new name for the foreign key to better identify it. For +example, you might want to rename id to person_id. +Ex. {‘KEY_NAME’: ‘NEW_KEY_NAME’}
+
retain_original: boolean
+
Retain the original column from the source table.
+
prepend:
+
Prepend the column name of the unpacked values. Useful for +avoiding duplicate column names
+
prepend_value:
+
Value to prepend new columns if prepend=True. If None, will +set to column name.
+
+
+
Returns:
+
+
Parsons Table
+
The new long table
+
+
+
+
+ +
+
+cut(*columns)[source]Âś
+

Return a table of selection of columns

+
+
Args:
+
+
*columns: str
+
Columns in the parsons table
+
+
+
Returns:
+
A new parsons table containing the selected columnns
+
+
+ +
+
+select_rows(*filters)[source]Âś
+

Select specific rows from a Parsons table based on the passed +filters.

+

Example filters:

+
tbl = Table([['foo', 'bar', 'baz'],
+             ['c', 4, 9.3],
+             ['a', 2, 88.2],
+             ['b', 1, 23.3],])
+
+# You can structure the filter in multiple wayss
+
+# Lambda Function
+tbl2 = tbl.select_rows(lambda row: row.foo == 'a' and row.baz > 88.1)
+tbl2
+>>> {foo: 'a', 'bar': 2, 'baz': 88.1}
+
+# Expression String
+tbl3 = tbl.select_rows("{foo} == 'a' and {baz} > 88.1")
+tbl3
+>>> {foo: 'a', 'bar': 2, 'baz': 88.1}
+
+
+
+
Args:
+
*filters: function or str
+
Returns:
+
A new parsons table containing the selected rows
+
+
+ +
+
+remove_null_rows(columns, null_value=None)[source]Âś
+

Remove rows if the values in a column are None. If multiple columns +are passed as list, it will remove all rows with null values in any +of the passed columns.

+
+
Args:
+
+
column: str or list
+
The column or columns to analyze
+
null_value: int or float or str
+
The null value
+
+
+
Returns:
+
None
+
+
+ +
+
+stack(*tables, missing=None)[source]Âś
+

Stack Parsons tables on top of one another.

+

Similar to table.concat(), except no attempt is made to align fields from +different tables.

+
+
Args:
+
+
tables: Parsons Table or list
+
A single table, or a list of tables
+
missing: bool
+
The value to use when padding missing values
+
+
+
Returns:
+
None
+
+
+ +
+
+concat(*tables, missing=None)[source]Âś
+

Concatenates one or more tables onto this one.

+

Note that the tables do not need to share exactly the same fields. +Any missing fields will be padded with None, or whatever is provided via the +missing keyword argument.

+
+
Args:
+
+
tables: Parsons Table or list
+
A single table, or a list of tables
+
missing: bool
+
The value to use when padding missing values
+
+
+
Returns:
+
None
+
+
+ +
+
+chunk(rows)[source]Âś
+

Divides a Parsons table into smaller tables of a specified row count. If the table +cannot be divided evenly, then the final table will only include the remainder.

+
+
Args:
+
+
rows: int
+
The number of rows of each new Parsons table
+
+
+
Returns:
+
List of Parsons tables
+
+
+ +
+
+static get_normalized_column_name(column_name)[source]Âś
+

Returns a column name with whitespace removed, non-alphanumeric characters removed, and +everything lowercased.

+
+
Returns:
+
+
str
+
Normalized column name
+
+
+
+
+ +
+
+match_columns(desired_columns, fuzzy_match=True, if_extra_columns='remove', if_missing_columns='add')[source]Âś
+

Changes the column names and ordering in this Table to match a list of desired column +names.

+
+
Args:
+
+
desired_columns: list
+
Ordered list of desired column names
+
fuzzy_match: bool
+
Whether to normalize column names when matching against the desired column names, +removing whitespace and non-alphanumeric characters, and lowercasing everything. +Eg. With this flag set, “FIRST NAME” would match “first_name”. +If the Table has two columns that normalize to the same string (eg. “FIRST NAME” +and “first_name”), the latter will be considered an extra column.
+
if_extra_columns: string
+
If the Table has columns that don’t match any desired columns, either ‘remove’ +them, ‘ignore’ them, or ‘fail’ (raising an error).
+
if_missing_columns: string
+
If the Table is missing some of the desired columns, either ‘add’ them (with a +value of None), ‘ignore’ them, or ‘fail’ (raising an error).
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+
+reduce_rows(columns, reduce_func, headers, presorted=False, **kwargs)[source]Âś
+

Group rows by a column or columns, then reduce the groups to a single row.

+

Based on the rowreduce petl function.

+

For example, the output from the query to get a table’s definition is +returned as one component per row. The reduce_rows method can be used +to reduce all those to a single row containg the entire query.

+
>>> ddl = rs.query(sql_to_get_table_ddl)
+>>> ddl.table
+
++--------------+--------------+----------------------------------------------------+
+| schemaname   | tablename    | ddl                                                |
++==============+==============+====================================================+
+| 'db_scratch' | 'state_fips' | '--DROP TABLE db_scratch.state_fips;'              |
++--------------+--------------+----------------------------------------------------+
+| 'db_scratch' | 'state_fips' | 'CREATE TABLE IF NOT EXISTS db_scratch.state_fips' |
++--------------+--------------+----------------------------------------------------+
+| 'db_scratch' | 'state_fips' | '('                                                |
++--------------+--------------+----------------------------------------------------+
+| 'db_scratch' | 'state_fips' | '\tstate VARCHAR(1024)   ENCODE RAW'               |
++--------------+--------------+----------------------------------------------------+
+| 'db_scratch' | 'state_fips' | '\t,stusab VARCHAR(1024)   ENCODE RAW'             |
++--------------+--------------+----------------------------------------------------+
+
+>>> reducer_fn = lambda columns, rows: [
+...     f"{columns[0]}.{columns[1]}",
+...     '\n'.join([row[2] for row in rows])]
+>>> ddl.reduce_rows(
+...     ['schemaname', 'tablename'],
+...     reducer_fn,
+...     ['tablename', 'ddl'],
+...     presorted=True)
+>>> ddl.table
+
++-------------------------+-----------------------------------------------------------------------+
+| tablename               | ddl                                                                   |
++=========================+=======================================================================+
+| 'db_scratch.state_fips' | '--DROP TABLE db_scratch.state_fips;\nCREATE TABLE IF NOT EXISTS      |
+|                         | db_scratch.state_fips\n(\n\tstate VARCHAR(1024)   ENCODE RAW\n\t      |
+|                         | ,db_scratch.state_fips\n(\n\tstate VARCHAR(1024)   ENCODE RAW         |
+|                         | \n\t,stusab VARCHAR(1024)   ENCODE RAW\n\t,state_name                 |
+|                         | VARCHAR(1024)   ENCODE RAW\n\t,statens VARCHAR(1024)   ENCODE         |
+|                         | RAW\n)\nDISTSTYLE EVEN\n;'                                            |
++-------------------------+-----------------------------------------------------------------------+
+
+
+
+
Args:
+
+
columns: list
+
The column(s) by which to group the rows.
+
reduce_func: fun
+
The function by which to reduce the rows. Should take the 2 +arguments, the columns list and the rows list and return a list. +reducer(columns: list, rows: list) -> list;
+
headers: list
+
The list of headers for modified table. The length of headers +should match the length of the list returned by the reduce +function.
+
presorted: bool
+
If false, the row will be sorted.
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+
+sort(columns=None, reverse=False)[source]Âś
+

Sort the rows a table.

+
+
Args:
+
+
sort_columns: list or str
+
Sort by a single column or a list of column. If None then +will sort columns from left to right.
+
reverse: boolean
+
Sort rows in reverse order.
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+
+set_header(new_header)[source]Âś
+

Replace the header row of the table.

+
+
Args:
+
+
new_header: list
+
List of new header column names
+
+
+
Returns:
+
Parsons Table and also updates self
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/ts.html b/docs/html/ts.html new file mode 100644 index 0000000000..ca836809a3 --- /dev/null +++ b/docs/html/ts.html @@ -0,0 +1,556 @@ + + + + + + + + + + + TargetSmart — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

TargetSmartÂś

+

TargetSmart provides access to voter and consumer data for the progressive community. They provide +extensive services for single record lookup through their API. For larger bulk matching services +they have an automation service, which requires that data files be posted to their SFTP. Each service requires separate credentials +to utilize, which is why there are separate classes for each.

+

Full documentation for both services can be found at the TargetSmart developer portal.

+
+

Warning

+
+
Returned fields
+
The fields that are returned are controlled by the TargetSmart staff. Please contact them if need any adjustments +or alterations made to the returned fields.
+
+
+
+

API 2.0Âś

+
+

Warning

+
+
Endpoint Access
+
Access to endpoints is individually provisioned. If you encounter errors accessing an endpoint, please contact +your TargetSmart account representative to verify that your API key have been provisioned access.
+
+
+
+
+class parsons.TargetSmartAPI(api_key=None)[source]Âś
+
+
+data_enhance(search_id, search_id_type='voterbase', state=None)Âś
+

Searches for a record based on an id or phone or email address

+
+
Args:
+
+
search_id: str
+
The primary key or email address or phone number
+
search_id_type: str
+
One of voterbase, exacttrack, abilitec_consumer_link, phone, +email, smartvan, votebuilder, voter, household.
+
state: str
+
Two character state code. Required if search_id_type of smartvan, +votebuilder or voter.
+
+
+
Returns
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+district(search_type='zip', address=None, zip5=None, zip4=None, state=None, latitude=None, longitude=None)Âś
+

Return district information based on a geographic point. The method allows you to +search based on the following:

+ +++++ + + + + + + + + + + + + + + + + + + + + +
Search TypeSearch Type NameRequired kwarg(s)
Zip Codezipzip5, zip4
Addressaddressaddress
Pointpointlatitude, longitude
+
+
Args:
+
+
search_type: str
+
The type of district search to perform. One of zip, address +or point.
+
address: str
+
An uparsed full address
+
zip5: str
+
The USPS Zip5 code
+
zip4: str
+
The USPS Zip4 code
+
state: str
+
The two character state code
+
latitude: float or str
+
Valid latitude floating point
+
lontitude: float or str
+
Valid longitude floating point
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+phone(table)Âś
+

Match based on a list of 500 phones numbers. Table +can contain up to 500 phone numbers to match

+
+
Args:
+
+
table: parsons table
+
See Parsons Table. One row per phone number, +up to 500 phone numbers.
+
+
+
Returns:
+
See Parsons Table for output options.
+
+
+ +
+ +

Search for a person based on a specified radius

+
+
Args:
+
+
first_name: str
+
One or more alpha characters
+
last_name: str
+
One or more alpha characters
+
middle_name: str
+
One or more alpha characters
+
name_suffix: str
+
One or more alpha characters
+
latitude: float
+
Floating point number (e.g. 33.738987255507)
+
longitude: float
+
Floating point number (e.g. -116.40833849559)
+
address: str
+
Any geocode-able address
+
address_type: str
+
reg for registration (default) or tsmart for TargetSmart
+
radius_unit: str
+
One of meters, feet, miles (default), or kilometers.
+
max_results: int
+
Default of 10. An integer in range [0 - 100]
+
gender: str
+
Default of a. One of m, f, u, a.
+
age_min: int
+
A positive integer
+
age_max: int
+
A positive integer
+
composite_score_min: int
+
An integer in range [1 - 100]. Filter out results with composite score +less than this value.
+
composite_score_max: int
+
An integer in range [1 - 100]. Filter out results with composite score +greater than this value.
+
last_name_exact: boolean
+
By default, the full last name is used for finding matches if the length of the +last name is not longer than 10 characters. As an example, “anders” is less likely +to match to “anderson” with this enabled. Disable this option if you are using +either last_name_is_prefix or last_name_prefix_length.
+
last_name_is_prefix: boolean
+
By default, the full last name is used for finding matches. Enable this parameter +if your search last name is truncated. This can be common for some client +applications that for various reasons do not have full last names. Use this +parameter along with last_name_prefix_length to configure the length of the last +name prefix. This parameter is ignored if last_name_exact is enabled.
+
last_name_prefix_length: int
+
By default, up to the first 10 characters of the search last name are used for +finding relative matches. This value must be between 3 and 10. This parameter is +ignored if last_name_exact is enabled.
+
+
+
Returns
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+voter_registration_check(first_name=None, last_name=None, state=None, street_number=None, street_name=None, city=None, zip_code=None, age=None, dob=None, phone=None, email=None, unparsed_full_address=None, obj_type='dict')Âś
+

Searches for a registered individual, returns matches.

+

A search must include the at minimum first name, last name and state.

+
+
Args:
+
+
first_name: str
+
Required; One or more alpha characters. Trailing wildcard allowed
+
last_name: str
+
Required; One or more alpha characters. Trailing wildcard allowed
+
state: str
+
Required; Two character state code (e.g. NY)
+
street_number: str
+
Optional; One or more alpha characters. Trailing wildcard allowed
+
street_name: str
+
Optional; One or more alpha characters. Trailing wildcard allowed
+
city: str
+
Optional; The person’s home city
+
zip_code: str
+
Optional; Numeric characters. Trailing wildcard allowed
+
age; int
+
Optional; One or more integers. Trailing wildcard allowed
+
dob; str
+
Numeric characters in YYYYMMDD format. Trailing wildcard allowed
+
phone; str
+
Integer followed by 0 or more * or integers
+
email: str
+
Alphanumeric character followed by 0 or more * or legal characters +(alphanumeric, @, -, .)
+
unparsed_full_address: str
+
One or more alphanumeric characters. No wildcards.
+
+
+
Returns
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

AutomationÂś

+

In order to instantiate the class you must pass valid kwargs or store the following +environmental variables:

+
    +
  • 'TS_SFTP_USERNAME'
  • +
  • 'TS_SFTP_PASSWORD'
  • +
+
+
+class parsons.TargetSmartAutomation(sftp_username=None, sftp_password=None)[source]Âś
+
+
+match(table, job_type, job_name=None, emails=None, call_back=None, remove_files=True)[source]Âś
+

Match a table to TargetSmart using their bulk matching service.

+
+

Warning

+
+
Table Columns
+
The automation job does not validates the file by column indexes +rather than columns names. So, if it expected 10 columns and you +only provide 9, it will fail. However, if you provide 10 columns that +are out of order, the job will succeed, but the records will not +match.
+
+
+
+
Args:
+
+
table: Parsons Table Object
+
A table object with the required columns. (Required columns provided be TargetSmart)
+
job_type: str
+
The match job type. This is case sensitive. (Match job names provided by TargetSmart)
+
job_name: str
+
Optional job name.
+
emails: list
+
A list of emails that will received status notifications. This +is useful in debugging failed jobs.
+
call_back: str
+
A callback url to which the status will be posted. See +TargetSmart documentation +for more details.
+
remove_files: boolean
+
Remove the configuration, file to be matched and matched file from +the TargetSmart FTP upon completion or failure of match.
+
+
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/turbovote.html b/docs/html/turbovote.html new file mode 100644 index 0000000000..6e6dfdc691 --- /dev/null +++ b/docs/html/turbovote.html @@ -0,0 +1,338 @@ + + + + + + + + + + + TurboVote — Parsons 0.5 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

TurboVoteÂś

+
+

OverviewÂś

+

TurboVote is an online voter registration and vote by mail +tool. This class contains a single method which allows you to export your users +(aka signups).

+
+

Note

+
+
Authentication
+
TurboVote requires HTTP Basic Auth. +Clients with a TurboVote account must pass their username, password, and subdomain.
+
+
+
+
+

QuickStartÂś

+

To instantiate the TurboVote class, you can either store your TurboVote API +username, password, subdomain as environmental variables (TURBOVOTE_USERNAME, +TURBOVOTE_PASSWORD, and TURBOVOTE_SUBDOMAIN, respectively) or pass them +in as arguments:

+
from parsons import TurboVote
+
+# First approach: Pass credentials via environmental variables.
+tv = TurboVote()
+
+# Second approach: Pass credentials as arguments.
+tv = TurboVote(username='me', password='pass', subdomain='myorg')
+
+
+

You can then call the method:

+
# Get users
+tv.get_users()
+
+
+
+
+

APIÂś

+
+
+class parsons.TurboVote(username=None, password=None, subdomain=None)[source]Âś
+

Instantiate the TurboVote class

+
+
Args:
+
+
username: str
+
A valid TurboVote username. Not required if TURBOVOTE_USERNAME +env variable set.
+
password: str
+
A valid TurboVote password. Not required if TURBOVOTE_PASSWORD +env variable set.
+
subdomain: str
+
Your TurboVote subdomain (i.e. https://MYORG.turbovote.org). Not +required if TURBOVOTE_SUBDOMAIN env variable set.
+
+
+
Returns:
+
class
+
+
+
+get_users()[source]Âś
+

Get users.

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/html/van.html b/docs/html/van.html new file mode 100644 index 0000000000..7bf0858f59 --- /dev/null +++ b/docs/html/van.html @@ -0,0 +1,1729 @@ + + + + + + + + + + + VAN — Parsons 0.1 documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + +
+ + + + +
+
+
+
+ +
+

VANÂś

+
+

OverviewÂś

+

The VAN module leverages the VAN API and generally follows the naming convention of their API endpoints. It +is recommended that you reference their API documentation to +additional details and information.

+
+

Note

+
+
API Keys
+
    +
  • VAN API Keys are required to use this module.
  • +
  • API Keys are specific to each committee and state, so you might need many.
  • +
  • Not all API Keys are provisioned for all end points. You should contact VAN if you need access.
  • +
  • VAN typically offers a cap of free API calls per day. API calls which exceed the free limit +incurr a cost.
  • +
+
+
+
+
+

Warning

+
+
VANIDs
+
VANIDs are unique to each state and instance of the VAN. VANIDs used for the AV VAN will not match +those of the SmartVAN.
+
+
+
+
+
+
+

QuickStartÂś

+

To call the VAN class you can either store the api key as an environmental variable VAN_API_KEY or pass it in as an argument..

+
from parsons import VAN
+
+ van = VAN(db='MyVoters') # Initiate class via environmental variable api key
+
+ van = VAN(api_key='asdfa-sdfadsf-adsfasdf',db='MyVoters') # Pass api key directly
+
+
+

You can then call various endpoints:

+
from parsons import VAN
+
+ van = VAN()
+
+ # List events with a date filter
+ events = van.events(starting_before='2018-02-01')
+
+ # List all folders shared with API Key User
+ folders = van.folders()
+
+ # Return a dataframe rather than a list of dicts
+ saved_lists = van.saved_lists().to_df()
+
+
+

This a is just a small sampling of all of the VAN endpoints that you can leverage. We recommend reviewing the +documentation for all functions.

+
+
+

Common WorkflowsÂś

+
+

Score: Loading and UpdatingÂś

+

Loading a score a multi-step process. Once a score is set to approved, loading takes place +overnight.

+
from parsons import VAN
+
+#Instatiate Class
+van = VAN(db="MyVoters")
+
+#List all of the scores / slots
+print json.dumps(van.scores(), indent=4)
+
+#Input the score slot id
+score_slot_id = 34115
+
+#Load the file via the file to VAN
+r = van.file_load('score.csv',
+              'https://box.com/scores.zip',
+              ['vanid','myscore'],
+              'vanid',
+              'VANID',
+               score_slot_id,
+               'myscore',
+               email='anemailaddress@gmail.com')
+
+
+# Update Status - The email that you get when it is loaded will include a score update
+# id. Pass this to approve the score to be loaded.
+#   - Might take a few minutes to get the email
+#   - Email will also include some nice stats to QC, included matched rows
+
+van.score_update_status(47187,'approved') # Pass the score update id and set to approved
+
+
+
+
+

People: Add Survey ResponseÂś

+

The following workflow can be used to apply survey questions, activist codes +and canvass reponses.

+
from parsons import VAN
+
+# Instatiate Class
+van = VAN(db="MyVoters")
+
+sq_id = 311838 # Valid survey question id
+sr = 1288926 # Valid survey response id
+ct = 36 # Valid contact type id
+it_id = 4 # Valid input type id
+
+# Create a valid survey question response
+sq_response = van.people_sq_response(sq_id,sr_id)
+van.people_canvass_response(dwid,
+                            key_type='dwid',
+                            contact_type_id=ct_id,
+                            input_type_id=it_id,
+                            responses=sq_response)
+
+
+
+
+

Event: Creating and ModifyingÂś

+

Events are made up of sub objects that need to exist to create an event

+
    +
  • Event Object - The event itself
  • +
  • Event Type - The type of event, such as a Canvass or Phone Bank. These are created +in the VAN UI and can be reused for multiple events.
  • +
  • Locations - An event can have multiple locations. While not required to initially create an +event, these are required to add signups to an event.
  • +
  • Roles - The various roles that a person can have at an event, such as Lead or +Canvasser. These are set as part of the event type.
  • +
  • Shifts - Each event can have multiple shits in which a person can be assigned. These are +specified in the event creation.
  • +
+
from parsons import VAN
+
+# Instatiate class
+van = VAN(db="EveryAction")
+
+# Create A Location
+loc_id = van.location(name='Big `Ol Canvass', address='100 W Washington', city='Chicago', state='IL')
+
+# Create Event
+name = 'GOTV Canvass' # Name of event
+short_name = 'GOTVCan' # Short name of event, 12 chars or less
+start_time = '2018-11-01T15:00:00' # ISO formatted date
+end_time = '2018-11-01T18:00:00' # ISO formatted date after start time
+event_type_id = 296199 # A valid event type id
+roles = [259236] # A list of valid role ids
+location_ids = [loc_id] # An optional list of locations ids for the event
+description = 'CPD Super Volunteers Canvass' # Optional description of 200 chars or less
+shifts = [{'name': 'Shift 1',
+           'start_time': '2018-11-01T15:00:00',
+           'end_time': '2018-11-11T17:00:00'}] # Shifts must fall within event start/end time.
+
+new_event = van.event_create(name, short_name, start_time, end_time, event_type_id, roles,
+                             location_ids=location_ids, shifts=shifts, description=description)
+
+
+
+
+

Signup: Adding and ModifyingÂś

+
from parsons import VAN
+
+# Instatiate class
+van = VAN(db="EveryAction")
+
+# Create a new signup
+
+vanid = 100349920
+event_id = 750001004
+shift_id = 19076
+role_id = 263920
+location_id = 3
+role_id = 263920
+status_id = 11
+
+# Create the signup. Will return a signup id
+signup_id  = van.signup_create(vanid, event_id, shift_id, role_id, status_id, location_id
+
+# Modify a status of the signup
+new_status_id = 6
+van.signup_update(signup_id, status_id=new_status_id)
+
+
+
+
+
+

PeopleÂś

+
+
+class parsons.ngpvan.van.People(van_connection)[source]Âś
+
+
+people_find(first_name=None, last_name=None, dob=None, email=None, phone=None, street_number=None, street_name=None, zip=None)[source]Âś
+

Search for a person record

+
+

Note

+

Person find must include the following minimum combinations to conduct +a search.

+
    +
  • first_name, last_name, email
  • +
  • first_name, last_name, phone
  • +
  • first_name, last_name, zip5, dob
  • +
  • first_name, last_name, street_number, street_name, zip5
  • +
  • email_address
  • +
+
+
+
Args:
+
+
first_name: str
+
Required; The person’s first name
+
last_name: str
+
Required; The person’s last name
+
dob: str
+
ISO 8601 formatted date of birth
+
email: str
+
The person’s email address
+
phone: str
+
Phone number of any type (Work, Cell, Home)
+
street_number: str
+
Street Number
+
street_name: str
+
Street Name
+
zip: str
+
5 digit zip code
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+people_get(primary_key, primary_key_type='vanid', addresses=False, emails=False, phones=False, custom_fields=False, external_ids=False, recorded_addresses=False, preferences=False, suppressions=False, reported_demographics=False, disclosure_field_values=False)[source]Âś
+

Returns a single person record

+
+
Args:
+
+
primary_key: int
+
The primary key of the person.
+
primary_key_type:
+
The type of primary key. Options may vary by VAN instance, but +vanid will always be a valid option.
+
addresses: boolean
+
Return all addresses
+
emails: boolean
+
Return all email addresses
+
phones: boolean
+
Return all email addresses
+
custom_fields: boolean
+
Return all custom fields
+
external_ids: boolean
+
Return all external ids
+
recorded_addresses: boolean
+
Return all recorded addresses
+
preferences: boolean
+
Return all preferences
+
suppressions: boolean
+
Return all suppressions
+
reported_demographics: boolean
+
Return all reported demographics
+
disclosure_field_values: boolean
+
Return all disclosure field values
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+people_sq_response(survey_question_id, survey_response_id)[source]Âś
+

Creates a survey question response to apply to a person +record. A helper function to generate responses to include in +people_canvass_response() function.

+
+
Args:
+
+
survey_question_id: int
+
A valid survey question id
+
survey_response_id: int
+
A valid survey response id for the survey question
+
+
+
Returns:
+
dict
+
+
+ +
+
+people_ac_response(activist_code_id, action)[source]Âś
+

Create an Activist Code response to apply or remove +activist codes from a person record. A helper function +to generate responses to include in people_canvass_response() +method.

+
+
Args:
+
+
activist_code_id: int
+
A valid activist code id
+
action: str
+
Either ‘apply’ or ‘remove’
+
+
+
Returns:
+
dict
+
+
+ +
+
+people_va_response(volunteer_activity_id, action)[source]Âś
+

Create a volunteer activity response to apply or remove +volunteer activities from a person record. A helper function to +generate responses to include in people_canvass_response() +function.

+
+
Args:
+
+
volunteer_activity_id: int
+
A valid volunteer activity id
+
action: str
+
Either ‘apply’ or ‘remove’
+
+
+
Returns:
+
dict
+
+
+ +
+
+people_canvass_response(primary_key, key_type='vanid', contact_type_id=None, input_type_id=None, date_canvassed=None, result_code_id=None, responses=None)[source]Âś
+

Applies a canvass response to a person record.

+
+
Args:
+
+
primary_key : str
+
A valid primary key for the person record
+
key_type : str
+
The type of primary key. Defaults to ‘vanid’, but depending +on your VAN instance, could be ‘dwid’, ‘statefileid’, etc.
+
result_code_id : int
+
Optional; Specifies the result code of the response. If +not included,responses must be specified. Conversely, if +responses are specified, result_code_id must be null.
+
contact_type_id : int
+
Optional; A valid contact type id
+
input_type_id : int
+
Optional; Defaults to 11 (API Input)
+
date_canvassed : str
+
Optional; ISO 8601 formatted date. Defaults to todays date
+
responses : list or dict
+
A set of response objects of volunteer activity +(people_va_response()), activist_codes +(people_ac_response()) or survey_questions +(people_sq_response()). You can mix and match as many as you +would like in the same call. If included, result_code_id must +be null.
+
+
+
Returns:
+
dict
+
+
+ +
+
+people_relationships(vanid_1, vanid_2, relationship_id)[source]Âś
+

Apply a relationship between two individuals

+
+
Args:
+
+
vanid_1 : int
+
The vanid of the primary individual; aka the node
+
vanid_2 : int
+
The vanid of the secondary individual; the spoke
+
relationship_id : int
+
The relationship id indicating the type of relationship
+
+
+
Returns:
+
Tuple of (204, No Content) if successful, (404, Not Found).
+
+
+ +
+ +
+
+

Activist CodesÂś

+
+
+class parsons.ngpvan.van.ActivistCodes(van_connection)[source]Âś
+

Class for ‘/activistCodes’ end points.

+
+
+activist_codes()[source]Âś
+

Return activist code objects

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+activist_code(activist_code_id)[source]Âś
+

Return a single activist code

+
+
Args:
+
+
activist_code_id : int
+
The activist code id associated with the activist code.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

Survey QuestionsÂś

+
+
+class parsons.ngpvan.van.SurveyQuestions(van_connection)[source]Âś
+
+
+survey_questions(statuses=None, name=None, sq_type=None, question=None, cycle=None, page_size=200)[source]Âś
+

Returns a list of survey questions. Use arguments to filter +the returned list.

+
+
Args:
+
+
statuses: str
+
Comma delimited list of statuses of Survey Questions. One or +more of Active (default), Archived, and Inactive.
+
name: str
+
Filters to Survey Questions with names that start with the +given input
+
sq_type: str
+
Filters to Survey Questions of the given type
+
question: str
+
Filters to Survey Questions with script questions that +contain the given input
+
cycle: str
+
A year in the format YYYY; filters to Survey Questions with +the given cycle
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+survey_question(survey_question_id)[source]Âś
+

Return a survey question

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

EventsÂś

+
+
+class parsons.ngpvan.van.Events(van_connection)[source]Âś
+

Class for ‘/events’ end points.

+
+
+events(code_ids=None, event_type_ids=None, rep_event_id=None, starting_after=None, starting_before=None, district_field=None, expand=['locations', 'codes', 'shifts', 'roles', 'notes'])[source]Âś
+

Returns a list of events. Use kwargs to filter event list.

+
+
Args:
+
+
code_ids: str
+
Filter by code id
+
event_type_ids: str
+
Filter by event_type_ids
+
rep_event_id: str
+
Filter by in repetitiona with event id
+
starting_after: str
+
Events beginning after iso8601 formatted date.
+
starting_before: str
+
Events beginning before iso8601 formatted date.
+
district_field: str
+
Filter by district field
+
page_size: str
+
Not Implemented
+
expand : list
+
A list of nested jsons to include in returned event +object. Can be locations, codes, shifts, +roles, notes.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+event(event_id, expand=['locations', 'codes', 'shifts', 'roles', 'notes'])[source]Âś
+

Returns a event object

+
+
Args:
+
+
event_id : int
+
Event id for the event object
+
expand : list
+
A list of nested jsons to include in returned event +object. Can be locations, codes, shifts, +roles, notes.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+event_create(name, short_name, start_date, end_date, event_type_id, roles, shifts=None, description=None, editable=False, publicly_viewable=False, location_ids=None, code_ids=None, notes=None, district_field_value=None, voter_registration_batches=None)[source]Âś
+

Create an event

+
+
Args:
+
+
name: str
+
A name for this Event, no longer than 500 characters
+
short_name: str
+
A shorter name for this Event, no longer than 12 characters
+
start_date: str
+
A start date and time for this Event
+
end_date: str
+
An end date and time for this Event that is after start_date
+
event_type_id: int
+
A valid event type id
+
roles: list
+
A list of valid role ids that correspond the with the event type
+
shifts:
+

A list of dicts with shifts formatted as:

+
[
+    {
+     'name': 'Shift 1',
+     'start_time': '12-31-2018T12:00:00',
+     'end_time': '12-31-2018T13:00:00'
+    }
+    {
+     'name': 'Shift 2',
+     'start_time': '12-31-2018T13:00:00',
+     'end_time': '12-31-2018T14:00:00'
+    }
+]
+
+
+
+
description: str
+
An optional description for this Event, no longer than 500 characters
+
editable: boolean
+
If true, prevents modification of this Event by any users other than the +user associated with the API context. Setting this to true effectively makes +the Event read-only in the VAN interface.
+
publicly_viewable: boolean
+
Used by NGP VAN’s website platform to indicate whether this Event can be +viewed publicly.
+
location_ids: lst
+
A list of location_ids where the event is taking place
+
code_ids: lst
+
A list of codes that are applied to this Event for organizational purposes. Note +that at most one Source Code, and any number of Tags, may be applied to an Event.
+
notes:
+
A list of notes
+
+
+
Returns:
+
The event code
+
+
+ +
+
+event_delete(event_id)[source]Âś
+

Delete an event

+
+
Args:
+
+
event_id: int
+
Unique identifier for an editable Event
+
+
+
Returns:
+
None
+
+
+ +
+
+event_add_shift(event_id, shift_name, start_time, end_time)[source]Âś
+

Add shifts to an event

+
+
Args:
+
event_id
+
Returns:
+
None
+
+
+ +
+
+event_types()[source]Âś
+

Returns event types

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

LocationsÂś

+
+
+class parsons.ngpvan.van.Locations(van_connection)[source]Âś
+

Class for ‘/locations’ end points.

+
+
+locations(name=None)[source]Âś
+

List locations and optionally filter by name

+
+
Args:
+
+
name: str
+
Filters to Locations with names that contain the given input
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+location(location_id)[source]Âś
+

Return a location object

+
+
Args:
+
+
location_id: int
+
A valid location id
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+location_create(name, address_line1=None, address_line2=None, city=None, state=None, zipcode=None)[source]Âś
+

Find or Create a location. If location already exists, will return location id.

+
+
Args:
+
+
name: str
+
A name for this Location, no longer than 50 characters
+
address_line1: str
+
Optional; First line of a Street Address
+
address_line2: str
+
Optional; Second line of a Street Address
+
city: str
+
Optional; City or Town
+
state: str
+
Optional; Two or three character State or Province code (e.g., MN, ON, NSW, etc.)
+
zipcode: str
+
Optional; ZIP, ZIP+4, Postal Code, Post code, etc.
+
Returns:
+
A location id
+
+
+
+
+ +
+
+location_delete(location_id)[source]Âś
+

Delete a location object

+
+
Args:
+
+
location_id:
+
A valid location id
+
+
+
Returns:
+
200: OK if successful and 404 Not Found if location not found
+
+
+ +
+ +
+
+

SignupsÂś

+
+
+class parsons.ngpvan.van.Signups(van_connection)[source]Âś
+

Class for ‘/signups’ end points.

+
+
+signups_statuses(event_id=None, event_type_id=None)[source]Âś
+

Return a list of valid signup statuses for a given event type +or event. You may pass either and event_id or an event_type_id +but not both.

+
+
Args:
+
+
event_id: int
+
A valid event id
+
event_type_id: int
+
A valid event type id
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+person_signups(vanid)[source]Âś
+

Return the signup history of a person.

+
+
Args:
+
+
vanid: int
+
A valid vanid associated with a person
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+event_signups(event_id)[source]Âś
+

Return the signup history of an event.

+
+
Args:
+
+
event_id: int
+
A valid event_id associated with an event
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+signup_get(event_signup_id)[source]Âś
+

Return a single signup object.

+
+
Args:
+
+
event_id: int
+
A valid event_signup_id associated with a signup.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+signup_create(vanid, event_id, shift_id, role_id, status_id, location_id)[source]Âś
+

Create a new signup for an event.

+
+
Args:
+
+
vanid: int
+
A valid vanid of the person to signup for the event.
+
event_id: int
+
A valid event_id to associate the person with the event
+
shift_id:
+
A shift_id, associated with the event to assign the person
+
role_id:
+
A role_id, associated with the event to assign the person
+
status_id:
+
A status_id of the person
+
location_id:
+
A location_id for the event
+
+
+
Returns:
+
+
Int
+
The event signup id
+
+
+
+
+ +
+
+signup_update(event_signup_id, shift_id=None, role_id=None, status_id=None, location_id=None)[source]Âś
+

Update a signup object. All of the kwargs will update the values associated +with them.

+
+
Args:
+
+
event_signup_id: int
+
A valid event signup id
+
shift_id: int
+
The shift_id to update
+
role_id: int
+
The role_id to update
+
status_id: int
+
The status_id to update
+
location_id: int
+
The location_id to update
+
+
+
Returns:
+
+
tuple
+
If successful (204, No Content)
+
+
+
+
+ +
+
+signup_delete(event_signup_id)[source]Âś
+

Delete a signup object

+
+
Args:
+
+
event_signup_id: int
+
A valid event signup id
+
+
+
Returns:
+
+
tuple
+
If successful (204, No Content)
+
+
+
+
+ +
+ +
+
+

CodesÂś

+
+
+class parsons.ngpvan.van.Codes(van_connection)[source]Âś
+

Class for ‘/codes’ end points.

+
+
+codes(name=None, supported_entities=None, parent_code_id=None, code_type=None, page_size=200)[source]Âś
+

Returns all codes

+
+
Args:
+
+
name : str
+
Optional Filter by name of code
+
supported_entities : str
+
Optional Filter by supported entities
+
parent_code_id : str
+
Optional Filter by parent code id
+
code_type : str
+
Optional Filter by code type
+
page_size : int
+
Optional Not implemented
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+code(code_id)[source]Âś
+

Returns a single code object

+
+
Args:
+
+
code_id : int
+
Code id for the code object
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+code_types()[source]Âś
+

Returns code types

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+code_create(parent_code_id=None, name=None, code_type=None, supported_entities=None)[source]Âś
+

Create a code

+
+ +
+
+code_delete(code_id)[source]Âś
+

Delete a code

+
+ +
+
+code_supported_entities()[source]Âś
+

List code supported entities

+
+ +
+ +
+
+

Canvass ResponsesÂś

+
+
+class parsons.ngpvan.van.CanvassResponses(van_connection)[source]Âś
+
+
+canvass_responses_contact_types()[source]Âś
+

List canvass response contact types

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+canvass_responses_input_types()[source]Âś
+

List canvass response input types

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+canvass_responses_result_codes()[source]Âś
+

List canvass response result codes

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

Saved ListsÂś

+
+

Note

+

A saved list must be shared with the user associated with your API key to +be listed.

+
+
+
+class parsons.ngpvan.van.SavedLists(van_connection)[source]Âś
+
+
+saved_lists(folder_id=None)[source]Âś
+

Returns all saved lists

+
+
Args:
+
+
folder_id : int
+
Optional; the id for a VAN folder. If included returns only +the saved lists in the folder
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+saved_list(saved_list_id)[source]Âś
+

Returns a single saved list object

+
+
Args:
+
+
saved_list_id : int
+
The saved list id associated with the saved list.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+saved_list_download(saved_list_id)[source]Âś
+

Download a saved list

+
+
Args:
+
+
saved_list_id
+
The saved list id associated with the saved list.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

FoldersÂś

+
+

Note

+

A folder must be shared with the user associated with your API key to +be listed.

+
+
+
+class parsons.ngpvan.van.Folders(van_connection)[source]Âś
+
+
+folders()[source]Âś
+

List all folders

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+folder(folder_id)[source]Âś
+

Get a single folder

+
+
Args:
+
+
folder_id: int
+
The folder id associated with the folder.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

Export JobsÂś

+
+
+class parsons.ngpvan.van.ExportJobs(van_connection)[source]Âś
+
+
+export_job_types()[source]Âś
+

Lists export job types

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+export_job_create(list_id, export_type=4, webhookUrl='https://www.nothing.com')[source]Âś
+

Creates an export job

+

Currently, this is only used for exporting saved lists. It is +recommended that you use the saved_list_download() method +instead.

+
+
Args:
+
+
list_id: int
+
This is where you should input the list id
+
export_type: int
+
The export type id, which defines the columns to export
+
webhookUrl:
+
A webhook to include to notify as to the status of the export
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options. Includes a +download link for the file.
+
+
+
+
+ +
+
+export_job(export_job_id)[source]Âś
+

Returns a single export job

+
+
Args:
+
+
export_job_id: int
+
Export job id
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

ScoresÂś

+

Prior to loading a score for the first time, you must contact VAN support to request +a score slot.

+
+

Note

+
+
Score Auto Approval
+
Scores can be automatically set to approved through the file_load function allowing +you to skip calling the file_load() function. To automatically approve scores, +if the average of the scores is within the fault tolerance specified by the user.It +is only available to API keys with permission to automatically approve scores.
+
+
+
+
+class parsons.ngpvan.van.Scores(van_connection)[source]Âś
+
+
+scores()[source]Âś
+

Returns all scores

+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+score(score_id)[source]Âś
+

Returns an individual score

+
+
Args:
+
+
score_id: int
+
The score id
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+ +
+
+

Score UpdatesÂś

+
+
+class parsons.ngpvan.van.ScoreUpdates(van_connection)[source]Âś
+
+
+score_updates(created_before=None, created_after=None, score_id=None)[source]Âś
+

Returns all score updates

+
+
Args:
+
+
created_before: str
+
Optional; Filter score updates to those created before date. Use “YYYY-MM-DD” +format.
+
created_after: str
+
Optional; Filter score updates to those created after date. Use “YYYY-MM-DD” +format.
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+ +
+
+score_update(score_update_id)[source]Âś
+

Returns a score update object

+
+
+
Args:
+
+
score_update_id : int
+
The score update id
+
+
+
Returns:
+
+
Parsons Table
+
See Parsons Table for output options.
+
+
+
+
+
+ +
+
+score_update_status(score_update_id, status)[source]Âś
+

Change the status of a score update object. This end point is used to +approve a score loading job.

+
+
Args:
+
+
score_update_id: str
+
The score update id
+
status: str
+
One of ‘pending approval’, ‘approved’, ‘disapproved’
+
+
+
Returns:
+
+
boolean
+
True if the status update is accepted
+
+
+
+
+ +
+ +
+
+

File Loading JobsÂś

+
+
+class parsons.ngpvan.van.FileLoadingJobs(van_connection)[source]Âś
+
+
+file_load(file_name, file_url, columns, id_column, id_type, score_id, score_column, delimiter='csv', header=True, description=None, email=None, auto_average=None, auto_tolerance=None)[source]Âś
+

Loads a file. Only used for loading scores at this time. Scores must be +compressed using zip.

+
+
Args:
+
+
file_name: str
+
The name of the file contained in the zip file.
+
file_url: str
+
The url path to directly download the file. Can also be a path to an FTP site.
+
columns: list
+
A list of column names contained in the file.
+
id_column: str
+
The column name of the id column in the file.
+
id_type: str
+
A valid primary key, such as VANID or DWID. Varies by VAN instance.
+
score_id: int
+
The score slot id
+
score_column: str
+
The column holding the score
+
delimiter: str
+
The file delimiter used.
+
email: str
+
A valid email address in which file loading status will be sent.
+
auto_average: float
+
The average of scores to be loaded.
+
auto_tolerance: float
+
The fault tolerance of the VAN calculated average compared to the auto_average. +The tolerance must be less than 10% of the difference between the maximum and +minimum possible acceptable values of the score.
+
+
+
Returns:
+
+
dict
+
The file load id
+
+
+
+
+ +
+
+file_load_multi(file_name, file_url, columns, id_column, id_type, score_map, delimiter='csv', header=True, description=None, email=None)[source]Âś
+

An iteration of the file_load() method that allows you to load multiple scores +at the same time.

+
+
Args:
+
+
file_name : str
+
The name of the file contained in the zip file.
+
file_url : str
+
The url path to directly download the file. Can also be a path to an FTP site.
+
columns: list
+
A list of column names contained in the file.
+
id_column : str
+
The column name of the id column in the file.
+
id_type : str
+
A valid primary key, such as VANID or DWID. Varies by VAN instance.
+
score_map : list
+

A list of dicts that adheres to the following syntax

+
[{'score_id' : int,
+  'score_column': str,
+  'auto_average': float,
+  'auto_tolerance': float }]
+
+
+
+
email: str
+
A valid email address in which file loading status will be sent.
+
+
+
Returns:
+
The file load job id
+
+
+ +
+ +
+
+ + +
+ +
+ + +
+
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/index-redirect.html b/docs/index-redirect.html deleted file mode 100644 index c21060dc6e..0000000000 --- a/docs/index-redirect.html +++ /dev/null @@ -1,9 +0,0 @@ - - - - Redirecting to latest release - - - - - \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index c3a56f3717..7621ebcc73 100755 --- a/docs/index.rst +++ b/docs/index.rst @@ -3,12 +3,6 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -.. image:: /_static/parsons_logo.png - :width: 250px - :height: 250px - :alt: Parsons logo - :align: center - About ===== @@ -23,23 +17,15 @@ The Movement Cooperative is a member led organization focused on providing data, License and Usage ================= -Usage of Parsons is governed by a `modified Apache License with author attribution statement `_. +Usage of Parsons is governed by the `TMC Parsons License `_, a modified Apache License with author attribution statement. -Resources -========= -* Documentation: ``_ -* Source Code: ``_ -* Project Website: ``_ -* Docker Image: ``_ - -Installation +Design Goals ============ +The goal of Parsons is to make the movement of data between systems as easy and straightforward as possible. Simply put, we seek to reduce the lines of code that are written by the progressive community. Not only is this a waste of time, but we rarely have the capacity and resources to fully unittest our scripts. -You can install Parsons using ``pip install parsons``. We recommend using a `virtual environment `_. - -Need more detail? We have a `detailed, beginner-friendly guide to installing Parsons `_ on our website. +.. image:: /_static/parsons_diagram.png -We also have a Parsons Docker container hosted on `DockerHub `_ for each release of Parsons. +Parsons seeks to be flexible from a data ingestion and output perspective, while providing ETL tools that recognize that our data is **always** messy. Central to this concept is the :ref:`parsons-table` the table-like object that most methods return. QuickStart ========== @@ -80,13 +66,73 @@ QuickStart ts = TargetSmart(api_key='MY_KEY') record = ts.data_enhance(231231231, state='DC') -Design Goals +Sources +======= +* Documentation: ``_ +* Source Code: ``_ + +Virtual Environments +==================== + +Normally, tools like `pip` install Python libraries directly to your system. Python scripts +or libraries look for their dependencies in your system. This can cause problems when you +have two scripts/libraries installed that require different versions of the same library. + +To solve this problem, we recommend you use *virtual environments* to install Parsons. +Virtual environments allow you to keep different sets of installed libraries so that you can +use different versions of the same libraries for different purposes. + +Windows +------------------------------ + +`Source `_ + +1. Install virtualenvwrappers from source:: + + git clone git://github.com/davidmarble/virtualenvwrapper-win.git + cd virtualenvwrapper-win + python setup.py install + +2. Find the ``Scripts\`` directory for your Python installation, such as ``C:\Users\\AppData\Local\Programs\Python\Python37\Scripts\``. + +3. Add the ``Scripts\`` directory to your Path. + +4. To create a virtual environment for Parsons, execute: ``mkvirtualenv parsons`` + +5. To use this virtual environment, execute: ``workon parsons`` + + +Linux / Mac OS +------------------------------ + + + +Installation ============ -The goal of Parsons is to make the movement of data between systems as easy and straightforward as possible. Simply put, we seek to reduce the lines of code that are written by the progressive community. Not only is this a waste of time, but we rarely have the capacity and resources to fully unittest our scripts. -.. image:: /_static/parsons_diagram.png +There are two ways to install Parsons: Using pip and from source. Use pip if you just want to install Parsons and start using it. Install from source if you might want to patch Parsons to customize its behavior. -Parsons seeks to be flexible from a data ingestion and output perspective, while providing ETL tools that recognize that our data is **always** messy. Central to this concept is the :ref:`parsons-table` the table-like object that most methods return. +Installing Parsons (pip) +----------------------------- + +1. Make sure you're in your parsons virtual environment: ``workon parsons``. + +2. Execute: ``pip install parsons`` + +Installing Parsons from Source +---------------------------------- + + +1. Make sure you're in your parsons virtual environment: ``workon parsons``. + +2. Execute these commands:: + + git clone https://github.com/move-coop/parsons.git + pip install -r requirements.txt + python setup.py install + + +3. To update your installation, pull the most recent branch from the Parsons GitHub repo (``git pull origin master``) and rerun ``python setup.py install``. Logging ======= @@ -106,65 +152,65 @@ In your scripts that use Parsons, if you want to override the default Parsons lo # parsons_logger.addHandler(...) # parsons_logger.setFormatter(...) -Integrating Parsons -=================== +Minimizing Resource Utilization +=============================== -A primary goal of Parsons is to make installing and use as easy as possible. Many of the patterns -and examples that we document are meant to show how easy it can be to use Parsons, but sometimes -these patterns trade immediate accessibility against ease of integration. +A primary goal of Parsons is to make installing and using the library as easy as possible. Many +of the patterns and examples that we document are meant to show how easy it can be to use Parsons, +but sometimes these patterns trade accessibility for performance. -In environments where Parsons is not the primary application, or in scenarios where Parsons must -run with limited resources, we recommend users install only the dependencies they need at loose -version constraints. To do this, simply set two environment variables before installing Parsons -and keep one while running: +In environments where efficiency is important, we recommend users take the following steps to +minimize resource utilization: -``` -export PIP_NO_BINARY=parsons -export PARSONS_LIMITED_DEPENDENCIES=true -pip install parsons -``` + 1. Don't import classes from the root Parsons package + 2. Install only the dependencies you need -``` -export PARSONS_LIMITED_DEPENDENCIES=true -python myparsons_script.py -``` +*** Don't import from the root Parsons package *** -`PIP_NO_BINARY` tells pip to use the source distribution of Parsons, which then allows -`PARSONS_LIMITED_DEPENDENCIES` to dynamically limit to the bare minimum dependencies needed to -run Parsons. Users may also install extra dependencies appropriate to their environment, e.g. +Throughout the Parsons documentation, users are encouraged to load Parsons classes like so: -``` -export PIP_NO_BINARY=parsons -export PARSONS_LIMITED_DEPENDENCIES=true -pip install parsons[google] +```python +from parsons import Table ``` -or +In order to support this pattern, Parsons imports all of its classes into the root `parsons` +package. Due to how Python loads modules and packages, importing even one Parsons class results +in ALL of them being loaded. In order to avoid the resource consumption associated with loading all +of Parsons, we have created a mechanism to skip loading of call of the Parsons classes. -``` -export PIP_NO_BINARY=parsons -export PARSONS_LIMITED_DEPENDENCIES=true -pip install parsons[google,ngpvan] -``` +If you set `PARSONS_SKIP_IMPORT_ALL` in your environment, Parsons will not import all of its classes +into the root `parsons` package. Setting this environment variable means you will **NOT** be able to +import using the `from parsons import X` pattern. Instead, you will need to import directly from the +package where a class is defined (e.g. `from parsons.etl import Table`). +If you use the `PARSONS_SKIP_IMPORT_ALL` and import directly from the appropriate sub-package, +you will only load the classes that you need and will not consume extra resources. Using this +method, you may see as much as an 8x decrease in memory usage for Parsons. -*** Don't import from the root Parsons package *** +*** Install only the dependencies you need *** -Throughout the Parsons documentation, users are encouraged to load Parsons classes like so: +Since Parsons needs to talk to so many different API's, it has a number of dependencies on other +Python libraries. It may be preferable to only install those external dependencies that you will +use. -.. code-block:: python +For example, if you are running on Google Cloud, you might not need to use any of Parsons' AWS +connectors. If you don't use any of Parsons' AWS connectors, then you won't need to install the +Amazon Boto3 library that Parsons uses to access the Amazon APIs. - from parsons import Table +By default, installing Parsons will install all of its external dependencies. You can prevent +these dependencies from being installed with Parsons by passing the `--no-deps` flag to pip +when you install Parsons. -In order to support this pattern, Parsons imports all of its classes into the root `parsons` -package. Due to how Python loads modules and packages, importing even one Parsons class results -in ALL of them being loaded. The `PARSONS_LIMITED_DEPENDENCIES` variable tells Parsons to skip -this; it will not import all of its classes into the root `parsons` package. Setting this -environment variable means you will **NOT** be able to import using the `from parsons import X` -pattern. Instead, you will need to import directly from the package where a class is defined -(e.g. `from parsons.etl import Table`). Using this method, you may see as much as an 8x -decrease in memory usage for Parsons! +``` +> pip install --no-deps parsons +``` + +Once you have Parsons installed without these external dependencies, you can then install +the libraries as you need them. You can use the requirements.txt as a reference to figure +out which version you need. At a minimum you will need to install the following libraries +for Parsons to work at all: +* petl Indices and tables ================== @@ -180,11 +226,9 @@ Indices and tables actblue action_kit - action_builder action_network airtable alchemer - auth0 aws azure bill_com @@ -192,13 +236,11 @@ Indices and tables bluelink box braintree - capitolcanary civis controlshift copper crowdtangle databases - donorbox facebook_ads freshdesk github @@ -207,16 +249,14 @@ Indices and tables mailchimp mobilize_america mobilecommons - nation_builder newmode ngpvan - p2a pdi + p2a quickbase redash rockthevote salesforce - scytl sftp shopify sisense @@ -256,14 +296,4 @@ Indices and tables :caption: Use Cases and Sample Scripts :name: use_cases_and_sample_scripts - use_cases/contribute_use_cases - use_cases/civis_job_status_slack_alert - use_cases/mysql_to_googlesheets - -.. toctree:: - :maxdepth: 1 - :caption: Training Guides - :name: training_guides - - training_guides/getting_set_up - training_guides/etl_best_practices + use_cases/contribute_use_cases \ No newline at end of file diff --git a/docs/mobilize_america.rst b/docs/mobilize_america.rst index 7807b46388..0f6b8ebabc 100644 --- a/docs/mobilize_america.rst +++ b/docs/mobilize_america.rst @@ -22,7 +22,6 @@ Quickstart If you instantiate ``MobilizeAmerica`` without an API Key, you can only use public methods: .. code-block:: python - from parsons import MobilizeAmerica # Instantiate class without API key @@ -36,7 +35,6 @@ In order to use private methods, you must provide an API key either by setting t variable ``MOBILIZE_AMERICA_API_KEY`` or by passing an ``api_key`` argument as shown below: .. code-block:: python - # Instantiate class without API key as argument ma = MobilizeAmerica(api_key='my_api_key') diff --git a/docs/nation_builder.rst b/docs/nation_builder.rst deleted file mode 100644 index 649f52dace..0000000000 --- a/docs/nation_builder.rst +++ /dev/null @@ -1,52 +0,0 @@ -NationBuilder -============== - -******** -Overview -******** - -The NationBuilder class allows you to interact with the NationBuilder API. Users of this Parsons integration can download a full list of people, update and upsert people. - -.. note:: - Authentication - In order to use this class you need your nation slug and access token. To get your access token login to your nation and navigate to ``Settings > Developer > API Token`` and create a new token. You can get more info in the `NationBuilder API docs `_. - -========== -Quickstart -========== - -To instantiate the NationBuilder class, you can either store your ``NB_SLUG`` and ``NB_ACCESS_TOKEN`` keys as environment -variables or pass them in as arguments: - -.. code-block:: python - - from parsons import NationBuilder - - # First approach: Use API key environment variables - - # In bash, set your environment variables like so: - # export NB_SLUG='my-nation-slug' - # export NB_ACCESS_TOKEN='MY_ACCESS_TOKEN' - nb = NationBuilder() - - # Second approach: Pass API keys as arguments - nb = NationBuilder(slug='my-nation-slug', access_token='MY_ACCESS_TOKEN') - -You can then make a request to get all people and save its data to a Parsons table using the method, ``get_people()``: - -.. code-block:: python - - # Create Parsons table with people data from API - parsons_table = nb.get_people() - - # Save people as CSV - parsons_table.to_csv('people.csv') - -The above example shows how to create a Parsons table with all people registered in your NationBuilder nation. - -*** -API -*** - -.. autoclass :: parsons.NationBuilder - :inherited-members: diff --git a/docs/ngpvan.rst b/docs/ngpvan.rst index 6c8f6d2531..69d4c69395 100755 --- a/docs/ngpvan.rst +++ b/docs/ngpvan.rst @@ -22,10 +22,9 @@ additional details and information. VANIDs VANIDs are unique to each state and instance of the VAN. VANIDs used for the AV VAN **will not** match those of the SmartVAN or VoteBuilder. - - Maintenance & Support + Maintenance & Suppoort VAN/EveryAction is not responsible for support of Parsons. Their support team cannot answer questions - about Parsons. Please direct any questions to the Parsons team via the issue tracker or Slack. + about Parsons. Please direct any questions .. toctree:: :maxdepth: 1 @@ -395,16 +394,6 @@ Locations ========= .. autoclass:: parsons.ngpvan.van.Locations :inherited-members: - -=========== -Printed Lists -=========== -.. note:: - A printed list must be shared with the user associated with your API key to - be listed. - -.. autoclass:: parsons.ngpvan.van.PrintedLists - :inherited-members: =========== Saved Lists diff --git a/docs/notifications.rst b/docs/notifications.rst index c07ef340d4..f9a98402ee 100755 --- a/docs/notifications.rst +++ b/docs/notifications.rst @@ -143,82 +143,3 @@ API *** .. autoclass:: parsons.Gmail :inherited-members: - -========== -SMTP -========== - - -******** -Overview -******** - -The SMTP module enables the sending of email through a generic SMTP server. If you have an email server other -than Gmail this is likely the best way to send emails with Parsons. - -.. note:: - Credentials - - Credentials are required to use the class. You'll need to provide a valid username and password for - the SMTP server you are using. - -.. toctree:: - :maxdepth: 1 - -********** -QuickStart -********** - -To initialize the SMTP class you will need to tell it how to connect to the SMTP server: - - -.. code-block:: python - - from parsons import SMTP - - smtp = SMTP( - host="fake.host.com", - port=9999, - username="my_username", - password="dont_use_this_password" - ) - -.. note:: - Environment Variables - - Instead of passing in values to initialize an instance of the SMTP class, you can set environment - variables to hold the values. The names of the environment variables are the names of the arguments - capitalized and prefixed with ``SMTP_``. For example, ``SMTP_HOST`` or ``SMTP_PASSWORD``. If both - an environment variable and an initialization argument are present, the argument will take precedence. - -The easiest way to send a message: - -.. code-block:: python - - smtp.send_email( - "sender@email.com", - "recipient@email.com", - "The Subject", - "This is the text body of the email" - ) - -The current version also supports sending html emails and emails with -attachments. - -.. code-block:: python - - smtp.send_email( - "sender@email.com", - "recipient@email.com", - "An html email with attachments", - "This is the text body of the email", - html="

This is the html part of the email

", - files=['file1.txt', 'file2.txt'] - ) - -*** -API -*** -.. autoclass:: parsons.SMTP - :inherited-members: - - - diff --git a/docs/p2a.rst b/docs/p2a.rst index 700cf0f36c..b1f8dd7d00 100644 --- a/docs/p2a.rst +++ b/docs/p2a.rst @@ -5,6 +5,46 @@ Phone2Action Overview ******** -This is a deprecated class only maintained here for backwards compatibility. Phone2action has been renamed to `Capitol Canary `_ and the Parson object has similarly been renamed. +`Phone2Action `_ is a digital advocacy tool used by progressive organizations. This class +allows you to interact with the tool by leveraging their `API `_. -To access the documentation see the `Capitol Canary page `_ +.. note:: + Authentication + You will need to email Phone2Action to request credentials to access the API. The credentials consist of an app ID and an app key. + +*********** +Quick Start +*********** + +To instantiate the ``Phone2Action`` class, you can either pass in the app ID and app key as arguments or set the +``PHONE2ACTION_APP_ID`` and ``PHONE2ACTION_APP_KEY`` environmental variables. + +.. code-block:: python + + from parsons import Phone2Action + + # Instantiate the class using environment variables + p2a = Phone2Action() + + # Get all advocates updated in the last day + import datetime + today = datetime.datetime.utcnow() + yesterday = today - datetime.timedelta(days=1) + + # get_advocates returns a dictionary that maps the advocate data (e.g. phones) to a parsons + # Table with the data for each advocate + advocates_data = p2a.get_advocates(updated_since=yesterday) + + # For all of our advocates' phone numbers, opt them into SMS + for phone in advocates_data['phones']: + phone_number = phone['phones_address'] + # Only update phone numbers that aren't already subscribed + if phone['subscribed']: + p2a.update_advocate(phone['advocate_id'], phone=phone_number, sms_opt_in=True) + +*** +API +*** + +.. autoclass :: parsons.Phone2Action + :inherited-members: diff --git a/docs/requirements.txt b/docs/requirements.txt index e18e81c1ef..0e1b2b30b4 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -2,4 +2,3 @@ Sphinx==4.3.2 sphinx-rtd-theme==1.0.0 myst-parser==0.16.1 -sphinx-multiversion diff --git a/docs/scytl.rst b/docs/scytl.rst deleted file mode 100644 index 16a6ea2bea..0000000000 --- a/docs/scytl.rst +++ /dev/null @@ -1,43 +0,0 @@ -Scytl -========= - -******** -Overview -******** - -Scytl, or Clarity Elections, is a company that creates a tool for publishing election results in real-time. It's used in the U.S. by several states and over 800 counties, including Georgia, Colorado, Arkansas, and Dallas County, Texas. - -For each participating election administrator, they publish a site with results that can be published on election night. Unfortunately, while that site contains downloadable data, that data is formatted in a complex way, making it difficult for developers to fetch election results. In general, their results either come zipped in either an unformatted text file or a complex XML document. Summary results come as a zipped CSV, but just contain top-line results. The JSON that powers the site results is even more complicated. - -This connector provides methods to download the latest election results from their site and formats them into readable lists of dictionaries, which can easily be converted into a Parsons Table or Pandas dataframe. - -Because this connector is can be useful for live reporting, it also contains a polling feature. As long as the class is instantiated, it will only fetch results that are new since the previous fetch of that method. To skip this feature, set force_update to true on any of the fetch methods. - -.. note:: - Authentication - All endpoints for Scytl are public, and do not need authentication. - -********** -Quickstart -********** - -To get started, initialize a Scytl class with the two-letter state code, the election id, and the county name (optional). - -To get these details, go to the website for the given election, and look in the url. For example, if the url is "https://results.enr.clarityelections.com/TX/Dallas/114890/web.285569/", then the state is "TX", the county is "Dallas", and the election ID is "114890". If the url is "https://results.enr.clarityelections.com/GA/114729/web.285569/", the state is "GA" and the election ID is "114729". - -.. code-block:: python - - from parsons import Scytl - - scy = Scytl(state = 'GA', election_id = '114729') - - # Get detailed results by geography - scy.get_detailed_results() - - -************** -Scytl Class -************** - -.. autoclass :: parsons.Scytl - :inherited-members: diff --git a/docs/table.rst b/docs/table.rst index 126723e166..0f9b4348fc 100755 --- a/docs/table.rst +++ b/docs/table.rst @@ -33,16 +33,16 @@ From Parsons Table * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_sftp_csv` - SFTP Server - Write a table to a csv stored on an SFTP server - * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_redshift` + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_csv` - A Redshift Database - Write a table to a Redshift database - * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_postgres` + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_postgres` - A Postgres Database - Write a table to a Postgres database * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_civis` - Civis Redshift Database - Write a table to Civis platform database - * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_petl` + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_petl` - Petl table object - Convert a table a Petl table object * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_json` @@ -53,17 +53,7 @@ From Parsons Table - Write a table to a local html file * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_dataframe` - Pandas Dataframe [1]_ - - Return a Pandas dataframe - * - :py:meth:`~parsons.etl.tofrom.ToFrom.append_csv` - - CSV file - - Appends table to an existing CSV - * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_zip_csv` - - ZIP file - - Writes a table to a CSV in a zip archive - * - :py:meth:`~parsons.etl.tofrom.ToFrom.to_dicts` - - Dicts - - Write a table as a list of dicts - + - Return a Pandas dataframe .. [1] Requires optional installation of Pandas package by running ``pip install pandas``. @@ -86,7 +76,7 @@ Create Parsons Table object using the following methods. * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_json` - File like object, local path, url, ftp. - Loads a json object into a Table - * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_columns` + * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_columns` - List object - Loads lists organized as columns in Table * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_redshift` @@ -101,9 +91,6 @@ Create Parsons Table object using the following methods. * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_s3_csv` - S3 CSV - Load a Parsons table from a csv file on S3 - * - :py:meth:`~parsons.etl.tofrom.ToFrom.from_csv_string` - - File like object, local path, url, ftp. - - Load a CSV string into a Table .. [2] Requires optional installation of Pandas package by running ``pip install pandas``. @@ -196,8 +183,6 @@ of commonly used methods. The full list can be found in the API section. - Divide tables into smaller tables based on row count * - :py:meth:`~parsons.etl.etl.ETL.remove_null_rows` - Removes rows with null values in specified columns - * - :py:meth:`~parsons.etl.etl.ETL.deduplicate` - - Removes duplicate rows based on optional key(s), and optionally sorts **Extraction and Reshaping** @@ -224,7 +209,7 @@ Parsons Table Indexing To access rows and columns of data within a Parsons table, you can index on them. To access a column pass in the column name as a string (e.g. ``tbl['a']``) and to access a row, pass in the row index as -an integer (e.g. ``tbl[1]``). +an integer (e.g. ``tbl[1]``). .. code-block:: python @@ -232,7 +217,7 @@ an integer (e.g. ``tbl[1]``). # Return a column as a list tbl['a'] - >> [1, 3] + >> [1, 3] # Return a row as a dict tbl[1] @@ -308,21 +293,8 @@ An example: tbl.to_redshift('main.name_table') This "lazy" loading can be very convenient and performant. However, it can make issues hard to debug. Eg. if your data transformations are time-consuming, you won't actually notice that performance hit until you try to use the data, potentially much later in your code. -There may also be cases where it's possible to get faster execution by caching a table, -especially in situations where a single table will be used as the base for several subsequent calculations. - -For these cases Parsons provides two utility functions to materialize a Table and all of its transformations. - -.. list-table:: - :widths: 25 50 - :header-rows: 1 - * - Method - - Description - * - :py:meth:`~parsons.etl.table.Table.materialize` - - Load all data from the Table into memory and apply any transformations - * - :py:meth:`~parsons.etl.table.Table.materialize_to_file` - - Load all data from the Table and apply any transformations, then save to a local temp file. +So just be aware of this behavior. ******** Examples @@ -361,9 +333,3 @@ The following methods allow you to manipulate the Parsons table data. .. autoclass:: parsons.etl.etl.ETL :inherited-members: - -********* -Materialize API -********* -.. autoclass:: parsons.etl.table.Table - :members: materialize, materialize_to_file \ No newline at end of file diff --git a/docs/targetsmart.rst b/docs/targetsmart.rst old mode 100644 new mode 100755 index 8c3796e0f2..f9956c98a9 --- a/docs/targetsmart.rst +++ b/docs/targetsmart.rst @@ -1,40 +1,104 @@ TargetSmart -=========== +============ +******** Overview ******** -`TargetSmart `_ provides access to voter and consumer data for the progressive community. +`TargetSmart `_ provides access to voter and consumer data for the progressive community. Currently, +there are two TargetSmart services that are supported by two Parsons classes, each requiring separate credentials: -TargetSmart Developer API -------------------------- +1. ``TargetSmartAPI``: `Single record lookup with HTTPS `_. This class provides methods to support searching for individual people, voters, and district information for a geographic location. +2. ``TargetSmartAutomation``: `Bulk record matching with SFTP `_. This class provides general methods for processing files instead of individual records. -Parsons provides methods to consume the data services provided by the -TargetSmart Developer API. These services include both low latency search and asynchronous list matching. +.. note:: + Authentication + Log in to `My TargetSmart `_ to access authentication credentials. You will need an API key + to use the ``TargetSmartAPI`` class, and an SFTP username and password to use the ``TargetSmartAutomation`` class. -* :doc:`Interacting with the TargetSmart Developer API <../targetsmart_api>` + For more information, see the `API documentation `_. -TargetSmart Automation Workflows --------------------------------- +.. warnings:: + Returned fields + Returned fields are controlled by the TargetSmart staff. Please contact them if adjustments are needed. -Parsons provides methods for interacting with TargetSmart Automation Workflows, -a solution for executing custom file processing workflows programmatically. In -some cases, TargetSmart will provide custom list matching solutions using -Automation Workflows. + Endpoint Access + Access to endpoints is individually provisioned. If you encounter errors accessing an endpoint, please contact + your TargetSmart account representative to verify that your API key has been provisioned access. -* :doc:`Interacting with TargetSmart Automation Workflows <../targetsmart_automation_workflows>` +*********** +TargetSmart +*********** -.. note:: - **TargetSmart Developer API versus Automation** +========== +Quickstart +========== + +To instantiate ``TargetSmartAPI``, you can either store your API Key as the environmental variable +``TS_API_KEY``, or pass it in as an argument: + +.. code-block:: python + + from parsons import TargetSmartAPI + + # First approach: Store API key as an environmental variable + ts_api = TargetSmartAPI() + + # Second approach: Pass API key as an argument + ts_api = TargetSmartAPI(api_key='my_api_key') + +You can then call various endpoints: + +.. code-block:: python + + # Search for a person record using an email address + ts_api.data_enhance(search_id='test@email.com', search_id_type='email') + + # Search for district information using an address + ts_api.district(search_type='address', address='123 test st, Durham NC 27708') + +=== +API +=== + +.. autoclass :: parsons.TargetSmartAPI + :inherited-members: + +********** +Automation +********** + +========== +Quickstart +========== + +To instantiate ``TargetSmartAutomation``, you can either store your SFTP username and password +as the environmental variables ``TS_SFTP_USERNAME`` and ``TS_SFTP_PASSWORD``, or pass them in as +keyword arguments: + +.. code-block:: python + + from parsons import TargetSmartAutomation + + # First approach: Store SFTP username and password as environmental variables + ts_auto = TargetSmartAutomation() + + # Second approach: Pass SFTP username and password as arguments + ts_auto = TargetSmartAutomation(username='my_sftp_username', password='my_sftp_password') + +You can then call various endpoints: + +.. code-block:: python + + # Check the status of a match job + ts_auto.match_status(job_name='my_job_name') - Unless TargetSmart has provided a custom workflow solution for you, you can - ignore the Automation information. + # Remove all files for the match job + ts_auto.remove_files(job_name='my_job_name') - TargetSmart's Developer API provides an HTTP-based interface for consuming the - general web services that TargetSmart provides. The TargetSmart Automation - system solely provides a solution for consuming customized file processing - workflows that are provisioned for specific client needs. TargetSmart Automation - is based on SFTP instead of HTTP. +=== +API +=== - - `TargetSmart Developer API docs on docs.targetsmart.com `_ - - `TargetSmart Automation docs on docs.targetsmart.com `_ +.. autoclass :: parsons.TargetSmartAutomation + :inherited-members: diff --git a/docs/targetsmart_api.rst b/docs/targetsmart_api.rst deleted file mode 100755 index 4bc1ceb196..0000000000 --- a/docs/targetsmart_api.rst +++ /dev/null @@ -1,75 +0,0 @@ -:orphan: - -TargetSmart Developer API -========================= - -`TargetSmart `_ provides access to voter and consumer data for the progressive community. - -Overview --------- - -The ``TargetSmartAPI`` class provides methods to consume the data services provided by the `TargetSmart Developer API `_. Parsons provides the following methods as convenient wrappers for interacting with the corresponding TargetSmart HTTP services: - -* ``data_enhance``: Quickly retrieve voter and consumer data enrichment fields from TargetSmart’s platform database for a previously identified individual. -* ``radius_search``: Search for individuals within a user specified geographic area defined by a radius centered around a latitude/longitude point. -* ``phone``: Enrich a list of phone numbers with TargetSmart data -* ``district``: Retrieve political district data using one of several lookup options -* ``voter_registration_check``: Search TargetSmart’s service database of registered voters, to check if a voter is registered at a given address. -* ``smartmatch``: Match CSV file records to TargetSmart's service database of voting age individuals. Multiple matching strategies are applied to find accurate matches and return enriched data. Read more about `SmartMatch `_, TargetSmart's list matching solution. - -Some TargetSmart API services have not yet been implemented in Parsons. For more information, see the `API documentation `_. - - -Authentication -.............. - -Log in to `My TargetSmart `_ to access authentication credentials. You will need an API key to use the ``TargetSmartAPI`` class. - -.. note:: - Endpoint Access - Access to endpoints is individually provisioned. If you encounter errors accessing an endpoint, please contact `TargetSmart Client Services `_ to verify that your API key has been provisioned access. - - -Data Enrichment -............... - -Most TargetSmart API services append a set of enrichment data fields as part of -a matching or search request. The presence of these fields are provisioned by -the TargetSmart Client Services team. Please contact `TargetSmart Client -Services `_ to learn more or request -adjustments. - - -Quickstart -========== - -To instantiate ``TargetSmartAPI``, you can either store your API Key as the environmental variable -``TS_API_KEY``, or pass it in as an argument: - -.. code-block:: python - - from parsons import TargetSmartAPI - - # First approach: Store API key as an environmental variable - ts_api = TargetSmartAPI() - - # Second approach: Pass API key as an argument - ts_api = TargetSmartAPI(api_key='my_api_key') - -You can then call various methods that correspond to TargetSmart endpoints: - -.. code-block:: python - - # Search for a person record using an email address - ts_api.data_enhance(search_id='test@email.com', search_id_type='email') - - # Search for district information using an address - ts_api.district(search_type='address', address='123 test st, Durham NC 27708') - - -API -=== - -.. autoclass :: parsons.TargetSmartAPI - :inherited-members: - diff --git a/docs/targetsmart_automation_workflows.rst b/docs/targetsmart_automation_workflows.rst deleted file mode 100644 index 675f49ed8b..0000000000 --- a/docs/targetsmart_automation_workflows.rst +++ /dev/null @@ -1,77 +0,0 @@ -:orphan: - -TargetSmart Automation Workflows -================================ - -In addition to the :doc:`TargetSmart Developer API <../targetsmart_api>`, -TargetSmart also provides a solution for executing custom data processing -workflows that TargetSmart implements for specific client needs. The -``TargetSmartAutomation`` class can be used to execute these workflows for -common purposes such as customized list matching workflows. Workflow execution -can take minutes to hours depending on the workflow type, size of data, and -queuing. - -.. note:: - Authentication - TargetSmart Automation workflows use SFTP. You will need to obtain SFTP credentials from TargetSmart to utilize the ``TargetSmartAutomation`` class. - -Quickstart -========== - -To instantiate ``TargetSmartAutomation``, you can either store your SFTP username and password -as the environmental variables ``TS_SFTP_USERNAME`` and ``TS_SFTP_PASSWORD``, or pass them in as -keyword arguments: - -.. code-block:: python - - from parsons import TargetSmartAutomation - - # First approach: Store SFTP username and password as environmental variables - ts_auto = TargetSmartAutomation() - - # Second approach: Pass SFTP username and password as arguments. These are - # provided by TargetSmart for your account. Warning: Do not store password - # literals in your source code. - ts_auto = TargetSmartAutomation( - sftp_username='my_sftp_username', sftp_password='my_sftp_password' - ) - - -You can then call these methods: - -.. code-block:: python - - # Execute a custom workflow that TargetSmart has provisioned for you. This - # blocks until completion and may take minutes/hours depending on the data size - # and workflow type - - input_table = Table.from_csv('my_file_to_match.csv') - output_table = ts_auto.match( - input_table, - 'workflow_name_provided_by_targetsmart', - 'my_job_name', - emails=['bob@example.com'], - ) - - # Most Automation workflows perform list matching, but not all. The ``execute`` - # method is an alias for the ``match`` method to avoid confusion in these cases. - # This is the equivalent to the above - output_table = ts_auto.execute( - input_table, - 'workflow_name_provided_by_targetsmart', - 'my_job_name', - emails=['bob@example.com'], - ) - - # Optionally check the status of a workflow execution - ts_auto.match_status(job_name='my_job_name') - - # Optionally remove all files for the match job. TargetSmart's lifecycle rules - # will remove eventually if not. - ts_auto.remove_files(job_name='my_job_name') - -API -=== - -.. autoclass :: parsons.TargetSmartAutomation - :inherited-members: diff --git a/docs/training_guides/etl_best_practices.rst b/docs/training_guides/etl_best_practices.rst deleted file mode 100644 index 3562e71f10..0000000000 --- a/docs/training_guides/etl_best_practices.rst +++ /dev/null @@ -1,558 +0,0 @@ -================================== -Introduction to ETL Best Practices -================================== - -This training guide will walk you through some basic ETL workflows using Parsons. It is based off of a two-part training designed by Cormac Martinez del Rio and Shauna Gordon-McKeon. - -It introduces the basic concepts behind the Extract, Transform and Load (ETL) process by working through two examples. First, we focus on how to write a basic Parsons script that moves data between one platform (Mobilize) and another (Google Sheets). Then, we introduce some more advanced concepts such as data warehouses, platforms like Civis, and the use of log tables and schedulers to make your workflow easier to run and debug. - -.. contents:: Table of Contents - :depth: 3 - -You can suggest improvements to this guide or request additional guides by filing an issue in our issue tracker or telling us in Slack. To get added to our Slack, and/or to request access to the recordings of this training, email us at *engineering@movementcooperative.org*. - -******** -Part One -******** - -^^^^^^^^^^^^ -Introduction -^^^^^^^^^^^^ - -Have you ever tried to get data from one platform or system to another? Doing so can be quite a pain, especially if you have to manually download and upload individual files. Luckily, websites and apps often have APIs, which stands for application programming interfaces. These are gateways that let us move data into and out of a system using code. - -**ETL**, which stands for Extract, Transform, and Load is the process by which we extract data from one systems API, transform that data so that it’s compatible with a second system's API, and then loading that data into that second system. - -The format that source system gives us data is often different from the format that another system likes receiving data! Everyone thinks their way is best. -Not so good for the movement, good for data engineers' job security. ;) - -Parsons can help us with every step of the way. We can use parsons to extract, transform, and load data! - -Today we're going to be using a Parsons script to move data from Mobilize to Google Sheets. Our inspiration was an experience Shauna had managing canvassing volunteers who had signed up on Mobilize, but whose canvassing status was being tracked via Google Sheets. - -The example script can be found in full `on Github `_. - -If you need help getting set up with Parsons so that you can run this script, check out our `getting started training guide `_. - -Okay, let's proceed! - -^^^^^^^^^^^^^^ -Authentication -^^^^^^^^^^^^^^ - -In order to access our data from Mobilize and add it to Google Sheets, we need to authenticate ourselves to these two services. We do this by getting the relevant credentials from the platform and then saving them to specific environmental variables. - -Each Parsons connector has a page in the documentation, and at the top of each page is a description of what credentials you need and how to get them. Sometimes this is straightforward, and sometimes it's more complicated. - -######## -Mobilize -######## - -To access Mobilize, you'll need to get an API key by contacting a support representative. If you don't have an account but would like to follow along anyway, we've provided some fake Mobilize data which we'll walk you through accessing below. - -If you were able to get an API key, you can now save it as the environmental variable ``MOBILIZE_AMERICA_API_KEY`` by running this command on the command line:: - - set MOBILIZE_AMERICA_API_KEY=$API_KEY # Windows - export MOBILIZE_AMERICA_API_KEY=$API_KEY # Linux/Mac - -(Not comfortable with the command line? Check out our `training guide `_.) - -And that's it, you're done! When you instantiate the Mobilize connector, it will look in the environment for ``MOBILIZE_AMERICA_API_KEY``. If it finds the key, it can use it to handle all the authentication for you. - -.. note:: - - What do we mean, "when you instantiate the Mobilize connector"? We've created the Mobilize connector class, which has general features anyone can use to work with Mobilize. But in order to actually work with that class, you need to create a "instance" of it. That instance will have data specific to you, such as your API key. - - "Instantiation" is just a fancy way to say "create an instance of". In Python, you instantiate something by calling it with parentheses, ie: ``mobilize_instance = Mobilize()``. - -############# -Google Sheets -############# - -Setting up the Google Sheets connector takes several steps. - -First, you'll need to go to the `Google Developers Console `_ and select the project you want to work with, or create a new one (recommended). Following `these instructions from Google `_, click **APIs & Auth** and then **APIs**. Select the Drive API from among the API options, and click **enable**. - -Once you've created a project and enabled the API, you'll need to get the credentials that will allow you to access the API. Click on the **credentials** option in the left sidebar. Click **create credentials** and select the **Service Account** option. Once you have filled out the form and clicked submit, it will give you a set of credentials as a json string which you can save to a file. - -Now we need to tell Parsons where it can find the credentials. We'll set an environmental variable ``GOOGLE_DRIVE_CREDENTIALS`` which is the path to where your credentials are stored (replace the paths below with your correct paths):: - - set GOOGLE_DRIVE_CREDENTIALS="C:\Home\Projects\" # Windows - export GOOGLE_DRIVE_CREDENTIALS="/home/projects/" # Linux/Mac - - -Learn more about paths :ref:`here `. - -Finally, look inside the credentials file for an email address in the field ``client_email``. It will look something like ``service-account@projectname-123456.iam.gserviceaccount.com``. Go to the Google Drive UI for the folder you want to work with and share the folder with this email address. - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Extracting Data from Moblize -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -################################# -Setting up: Imports and Instances -################################# - -Before we jump into moving data around, lets import all the things we need and instantiate our connectors. - -Your imports should look like this:: - - import json - from datetime import datetime - from parsons import Table, MobilizeAmerica, GoogleSheets - -`json `_ is a Python module that helps us convert between data in a JSON format (which is a popular way to store and share data) and Python data structures. - -`datetime `_ is a Python module that helps us work more easily with dates and times. - -Finally, from Parsons, we're importing the two connectors we're using, plus the Parsons Table object. The Parsons Table is the core data structure in Parsons. It's a standardized way to hold data, which makes it very easy to move data between vendors even if the vendors have different structures. - -We instantiate our connectors with this code:: - - mobilize = MobilizeAmerica() - google_sheets = GoogleSheets() - -And we're ready to start extracting! - -########## -Extracting -########## - -We're going to extract some data on attendance from Mobilize. We can do that with this code:: - - attendance_records = mobilize.get_attendances() - -If you weren't able to get an authenticated Mobilize account, you can use the fake Mobilize data in `this google sheet `_:: - - spreadsheet_id = "1YZr6gXmptxfzqb_t58frwNHhVu_KMTQzvMpnNUZd47I" - attendance_records = google_sheets.get_worksheet(spreadsheet_id) - -And...that's it! We've got our data. Let's take a look at what we've extracted:: - - print(attendance_records) - -The result should look like this:: - - {'id': '46273', 'event_id': '454545', 'event_title': 'January Canvass', 'timeslot_id': '738375', 'timeslot_start_date': '1642865400', 'timeslot_end_date': '1642872600', 'status': 'REGISTERED', 'attended': 'true', 'person': '{"id": 1, "given_name": "Lou", "family_name": "Slainey", "email_address": "lslainey0@unicef.org", "phone_number": "3271326753", "postal_code": "78737"}'} - {'id': '46274', 'event_id': '454546', 'event_title': 'January Textbank', 'timeslot_id': '239573', 'timeslot_start_date': '1643563800', 'timeslot_end_date': '1643527800', 'status': 'REGISTERED', 'attended': 'true', 'person': '{"id": 2, "given_name": "Arleyne", "family_name": "Ransfield", "email_address": "aransfield1@qq.com", "phone_number": "2174386332", "postal_code": "78737"}'} - {'id': '46275', 'event_id': '454547', 'event_title': 'February Canvass', 'timeslot_id': '183743', 'timeslot_start_date': '1644939000', 'timeslot_end_date': '1644946200', 'status': 'REGISTERED', 'attended': 'true', 'person': '{"id": 3, "given_name": "Alameda", "family_name": "Blackmuir", "email_address": "ablackmuir2@wisc.edu", "phone_number": "3844977654", "postal_code": "78737"}'} - {'id': '46276', 'event_id': '454548', 'event_title': 'February Phonebank', 'timeslot_id': '283666', 'timeslot_start_date': '1645378200', 'timeslot_end_date': '1645342200', 'status': 'REGISTERED', 'attended': 'true', 'person': '{"id": 4, "given_name": "Bondie", "family_name": "Berrow", "email_address": "bberrow3@discuz.net", "phone_number": "2275080414", "postal_code": "78737"}'} - {'id': '46277', 'event_id': '454549', 'event_title': 'March Relational Organizing Hour', 'timeslot_id': '477483', 'timeslot_start_date': '1648218600', 'timeslot_end_date': '1648225800', 'status': 'REGISTERED', 'attended': 'true', 'person': '{"id": 5, "given_name": "Korrie", "family_name": "Spight", "email_address": "kspight4@sakura.ne.jp", "phone_number": "9818241063", "postal_code": "78737"}'} - ... - -There are more than five rows in our table, but ``print`` only displays the first five rows by default, for readability's sake. - -As you can see, this data corresponds to what's in the Google sheet. We display the data in a Python dictionary, with the column names as keys and the actual contents of each cell as the values. You can ask for any row of a Parsons Table as a dictionary:: - - print(attendance_records[0]) - >> {'id': '46273', 'event_id': '454545', 'event_title': 'January Canvass', 'timeslot_id': '738375', 'timeslot_start_date': '1642865400', 'timeslot_end_date': '1642872600', 'status': 'REGISTERED', 'attended': 'true', 'person': '{"id": 1, "given_name": "Lou", "family_name": "Slainey", "email_address": "lslainey0@unicef.org", "phone_number": "3271326753", "postal_code": "78737"}'} - -You can also get any column of a Parsons Table as a list of values:: - - print(attendance_records["event_title"]) - >> ['January Canvass', 'January Textbank', 'February Canvass', 'February Phonebank', 'March Relational Organizing Hour' ... - -Because individual rows are treated as dictionaries, and individual columns as list, that makes it easy to iterate over them with a for loop:: - - for index, attendance in enumerate(attendance_records): - print(attendance['person']) - -There are also a couple of convenience methods for getting the total number of rows and the list of column names:: - - attendance_records.num_rows - attendance_records.columns - -No matter where you got your data from, these methods should always work! That's the benefit of using a standardized format like a Parsons Table. - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Transforming Data with Parsons -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -#################### -Fixing Dates + Times -#################### - -Let's make some fixes to our data. First off, those timeslot fields are confusing! What kind of date is ``1642865400``? - -(It's actually something called a `unix timestamp `_, which measures the total number of seconds since January 1st, 1970. Why January 1st, 1970? No real reason! They just needed to pick a date and I guess that seemed like a good one.) - -Let's convert these unix timestamps to something more readable. To do this, we define a function that takes in a value and returns a value:: - - def convert_to_legible_date(unix_date): - return datetime.utcfromtimestamp(int(unix_date)).strftime('%Y-%m-%d %H:%M:%S') - -Here, we're using the ``datetime`` library mentioned above. The ``strftime`` method is what determines the new format. For example, ``%Y`` means "Year with century as a decimal number" (like, say, 1970), and ``%m`` means "Month as a zero-padded decimal number" (like, say, 01). Here's a `cheatsheet `_ in case you want to play around with the formatting. - -Once we've got our function, we can apply it to all the rows in a column by using the Parsons Table's ``convert_column`` function:: - - attendance_records.convert_column('timeslot_start_date', convert_to_legible_date) - -Notice how the first parameter passed to the method names the column to be converted, while the second parameter is the function to be applied to each row in the column. The original value of the cell will be passed into the function, and whatever is returned will be the new value of the cell. - -################## -Unpacking a Column -################## - -Currently in our table, each person's contact info is crammed into a single column, formatted as a JSON string. That's a bummer!:: - - 'person': '{"id": 1, "given_name": "Lou", "family_name": "Slainey", "email_address": "lslainey0@unicef.org", "phone_number": "3271326753", "postal_code": "78737"}' - -We can turn these fields into their own columns in two steps. - -First, we're going to convert that column from a json string to a Python dictionary. As long as the string is formatted correctly, the only thing we need to do is pass in the ``json.loads`` method:: - - attendance_records.convert_column('person', json.loads) - -Then we can use a special Parsons method, ``unpack_dict``, to turn the keys of a dictionary into multiple columns!:: - - attendance_records.unpack_dict('person', prepend=False) - -########################### -Aggregating Data Using PETL -########################### - -Parsons tables are built on top of PETL tables. `PETL `_ is a general purpose Python package for data science similar to `PANDAS `_. - -Because Parsons tables are built on PETL tables, you can use any PETL function on a Parsons Table. Just convert your Parsons table to a PETL table with the ``.table`` method:: - - petl_table = attendance_records.table - -One example of a useful PETL function is ``Aggregate()`` which allows you to summarize data across rows. For instance, the following code gets the total number of signups by event:: - - sign_ups_by_event_petl = petl_table.aggregate('event_title', len) - -We can then convert the result back into a Parsons Table, if needed:: - - sign_ups_by_event_parsons = Table(sign_ups_by_event_petl) - -############## -Selecting Rows -############## - -One last transformation! Let's use the ``select_rows`` function to separate the event attendances by the month that they happened:: - - jan_attendances = attendance_records.select_rows("'2022-01' in {timeslot_start_date}") - feb_attendances = attendance_records.select_rows("'2022-02' in {timeslot_start_date}") - mar_attendances = attendance_records.select_rows("'2022-03' in {timeslot_start_date}") - -Note that this only works if we successfully transformed ``timeslot_start_date`` above! - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Loading Data to Google Sheets -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Let's go ahead and create a new spreadsheet to load data into. We'll put it in a folder that already exists. To get the folder ID below, look in the URL. The folder ID is the long string of letters and numbers, like so:: - - folder_id = "1y1jgygK5YUQLVrgRgNw7A8Hf2ppqOJJZ" # get from URL - -We also need to give our new spreadsheet a name:: - - spreadsheet_name = "Volunteer Attendance Records" - -We can use these two variables with the ``create_spreadsheet`` command, and save the sheet_id for later use:: - - sheet_id = google_sheets.create_spreadsheet(spreadsheet_name, folder_id=folder_id) - -The ``overwrite_sheet`` overwrites an existing sheet with data:: - - google_sheets.overwrite_sheet(sheet_id, jan_attendances) - google_sheets.overwrite_sheet(sheet_id, feb_attendances) - -If you run both commands, you should only see the February attendances, because they'll have overwritten the January ones. But maybe you don't want to do that. Maybe you want to append all the data. You can do that too:: - - google_sheets.overwrite_sheet(sheet_id, jan_attendances) - google_sheets.append_to_sheet(sheet_id, feb_attendances) - google_sheets.append_to_sheet(sheet_id, mar_attendances) - -Note how the first command overwrites the sheet, starting us fresh, but the other two use ``append_to_sheet``. - -You can also format cells using the ``format_cells`` method:: - - red = {"red": 1.0, "green": 0.0, "blue": 0.0} - google_sheets.format_cells(sheet_id, "A1", {"backgroundColor": red}, worksheet=0) - -Formatting a random cell red is a bit silly though. Let's try a more interesting example. We're going to overwrite our attendance records, just to make sure we're working from a fresh start. Then we'll go through the records one by one and, if the person didn't attend, we'll make their background red:: - - google_sheets.overwrite_sheet(sheet_id, attendance_records) # overwrite sheet - - for index, row in enumerate(attendance_records): - adjusted_index = index + 2 # accounts for python zero-indexing and header row - if row["attended"] == "false": - cell_range = f"A{adjusted_index}:N{adjusted_index}" - google_sheets.format_cells(sheet_id, cell_range, {"backgroundColor": red}, worksheet=0) - -The Parsons Google Sheets connector only exposes a few very common functions directly. Everything else we'll need to use the underlying client for. If you use a client function a lot, feel free to suggest to us that we add it to the Parsons connector directly! That will make it easier for you and others to use. - -.. note:: - - What is a client? A client is a tool that makes is easier to access APIs by handling all the details of making `HTTP requests `_. - - Many big software companies, such as Google, maintain clients in various languages to encourage people to use their APIs. We use `Google's Python client `_, which means we have access to all the cool features that Google developers have added to that client. - - Many smaller software companies, including most progressive organizations, do not have enough resources to maintain clients. For those connectors, we use `our own simple client `_ to make requests. It does not have any additional connector-specific features. - - You can access the client on a connector, whatever kind it is, with the method ``client``, ie ``mobilize.client``. (Sometimes, like in the case of Google Sheets, the client has a different, custom name such as ``google_sheets.gspread_client``. We're trying to make everything consistent but we haven't quite managed it yet, alas!) - -Let's just re-write the code above to show you what it would look like if we were using the client to do it:: - - google_sheets.overwrite_sheet(sheet_id, attendance_records) # overwrite sheet - worksheet = google_sheets.gspread_client.open(spreadsheet_name).sheet1 # get client's worksheet object - - for index, row in enumerate(attendance_records): - adjusted_index = index + 2 # accounts for python zero-indexing and header row - if row["attended"] == "false": - cell_range = f"A{adjusted_index}:N{adjusted_index}" - worksheet.format(cell_range, {"backgroundColor": red}) - -As you can see, the code is pretty similar. The only difference is that we use ``gspread_client`` to directly call a client method (``open``) and then work with the object that the client returns (``worksheet``) when formatting the cells. - -******** -Part Two -******** - -^^^^^^^^^^^^^^^^^^^^^^ -Using a Data Warehouse -^^^^^^^^^^^^^^^^^^^^^^ - -We've gone over how to write a script that takes data from one place, transforms it, and then moves it to another. But many people find it helpful to store their data in a centralized location. This can be desirable for a few different reasons: - -* Using a data warehouse can make it easier to look at your data all together and to track changes to it -* Most warehouses let you query data with SQL queries, which many people find easier or more familiar -* Warehouse are often optimized for dealing with very large data sources, which is helpful if you're using large data sets. - -In other words, it's convenient to extract data from your source system and load it in to your data warehouse. From there, you can do some data transformations in SQL to prepare the data for the destination system, and the push the data to your destination system. - -Some examples of data warehouses are BigQuery, SnowFlake, and Redshift. Low cost solutions could be Google sheets (maybe using Google Data Studio as a reporting tool.) - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -New Example: Mobilize ➡ Civis/Redshift ➡ Action Network -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -For the second half of this training, we're going to be focused on a new use case. We'll be trying to move data from Mobilize to Civis/Redshift to Action Network. If you don't have a Civis account, you won't be able to follow along with this part of the guide at home, so we've included a lot of screenshots. :) - -The Mobilize to Action Network sync is something we'd want to run every day on an automated basis. There are various tools that can help automate syncs like ours. We're using Civis, but we could also use Fivetran, Airflow, or chron jobs. If you'd like a guide that goes through using a different tool, please request one! - -.. image:: ../_static/images/civis_etl_workflow.png - -What we're looking at here is a Civis workflow for our sync. You can see in the schedule box to the right that the workflow is set up to run daily at 1am. - -The three steps of our ETL pipeline are displayed under the big letters E, T and L below: - -* The first thing that happens is Mobilize data is imported to our data warehouse. That takes care of the E of ETL. -* In the second part of the workflow, we prepare the data for Action Network by writing a SQL query. That's the T of ETL. -* In the final step of the workflow, a python script loads the data prepared by the SQL script into Action Network. That's the L. - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Step 1: Extracting Data Into the Warehouse -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Tools like Civis often have no-code solutions for getting data from your source system into your data warehouse. That makes our jobs quite a bit easier! This screenshot shows the interface for importing data from Mobilize using Civis: - -.. image:: ../_static/images/civis_mobilize_import.png - -If that's not an option, because Civis doesn't have an importer for your tool or for some other reason, you can write a custom Python script which extracts data from the source system. You can use Parsons for this:: - - - from Parsons import Table, MobilizeAmerica, Redshift - - mobilize = MobilizeAmerica() - rs = Redshift() - - attendances = mobilize.get_attendances() - rs.copy(attendances, 'mobilize.attendances', if_exists='drop', alter_table=True) - -The ``rs.copy`` used here loads data into the RedShift database you're connected to. The ``mobilize.attendances`` parameter specifies which table to copy the data to. The ``copy`` method can also be used with the BigQuery connector. - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Step 2: Transforming Data in Warehouse with SQL -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -With our previous script, we transformed data using Python, but you may be more comfortable using SQL. When you're using a data warehouse like Civis, you can run a SQL query or two (or more!) during the transformation step. - -.. code-block:: sql - - CREATE TABLE mobilize_schema.mobilize_users_to_sync AS ( - - SELECT DISTINCT - user_id as mobilizeid - , given_name - , family_name - , email_address - , phone_number - , postal_code - FROM mobilize_schema.mobilize_participations as mob - -- Joining the log table lets us know which records have been synced - -- and which records still need to be synced - LEFT JOIN cormac_scratch.mobilize_to_actionnetwork_log as log - on log.mobilizeid = mob.user_id - WHERE log.synced is null - - ); - - -This script creates a table where each row is a unique Mobilize user that needs to be synced to Action Network. It creates this table from the participations table by using the ``DISTINCT SQL`` function. - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Step 3: Load Data from Warehouse to Action Network -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The final step is to move data from the warehouse to Action Network. You can use `this script `_ to follow along if you have a Civis account. - -Before we dive into the script, let's go over a few key concepts: log tables, and logging. - -#################### -Log Tables & Logging -#################### - -Log Tables and loging are two distinct things, but they serve the same general purpose: helping us to track what's happening to our data, which is especially useful when something goes wrong. - -Log Tables are tables in our database where we store information about our attempts to sync records. When we're saving data to log tables, it looks like this:: - - log_record = { - 'mobilizeid': mobilize_user['mobilizeid'], - 'actionnetworkid': actionnetworkid, - 'synced': True, - 'errors': None, - 'date': str(datetime.now()) - } - - # Add the record of our success to the history books - loglist.append(log_record) - -The logging package, conversely, is a standard part of Python. Logs are usually saved as strings and saved to a single file or printed to standard output. It's for less formal analyses, like being able to check "hey where's my code at". When we're saving data via the logging package, it looks like this:: - - logger.info('Starting the sync now.') - -########################### -Stepping Through the Script -########################### - -We start by pulling our Mobilize data out of the Redshift table where it's been stored, and logging (informally) that we've done so:: - - sql_query = 'select * from mobilize_schema.mobilize_users_to_sync limit 5;' - new_mobilize_users = my_rs_warehouse.query(sql_query) - - logger.info(f'''There are {new_mobilize_users.num_rows} new mobilize users that need to be synced to - Action Network.''') - - if new_mobilize_users.num_rows > 0: - logger.info('Starting the sync now.') - -We can now iterate through each of our new mobilize users. For each Mobilize user, we're going to try and sync them to Action Network. If that doesn't work, we'll log the errors. We'll do this using what's known as a try-except statement in Python:: - - for mobilize_user in new_mobilize_users: - - try: - - # try this code - - except Exception as error: - - # if we get an error, do this instead - -.. warning:: - - Pythonistas refer to handling an exception as "catching" it. It is considered bad practice to catch a "bare" (generic) Exception. You should instead try to be as specific as possible. Ask yourself: what kind of errors am I expecting? For instance, here we might expect database errors and want to handle them without crashing the script, but we might not expect errors in our Python syntax. We probably still want our code to break if we make a typo, so that we can find and fix the typo! - - If you know that you're okay with, say, ValueErrors, you can write a try-except like this:: - - try: - # stuff - except ValueError as error: - # other stuff - - This try-except catches and handles only ValueErrors. All other errors will be "thrown" instead of "caught", which will halt/crash the script. - - -Let's take a look inside the try statement. What are we trying to do? :: - - actionnetwork_user = my_actionnetwork_group.add_person( - email_address=mobilize_user['email_address'], - given_name=mobilize_user['given_name'], - family_name=mobilize_user['family_name'], - mobile_number=mobilize_user['phone_number'], - tag='Mobilize Event Attendee', - postal_addresses=[ - { - 'postal_code': mobilize_user['postal_code'] - } - ] - ) - - # Get Action Network ID - identifiers = actionnetwork_user['identifiers'] - actionnetworkid = [entry_id.split(':')[1] - for entry_id in identifiers if 'action_network:' in entry_id][0] - - # Create a record of our great success - log_record = { - 'mobilizeid': mobilize_user['mobilizeid'], - 'actionnetworkid': actionnetworkid, - 'synced': True, - 'errors': None, - 'date': str(datetime.now()) - } - - # Add the record of our success to the history books - loglist.append(log_record) - -We get the data from each ``mobilize_user`` in our Parsons Table and send that data to Action Network via the ``add_person`` method. (There's a little bit of fancy formatting done to send the ``postal_addresses`` info. You can figure out if data needs special formatting by checking out the connector's docs. For instance, the docs for ``add_person`` can be found `here `_.) - -Action Network sends back information about the user. We do another bit offancy formatting work to extract the action network ID. - -If we got all the way to this point in the script without breaking on an error, then our sync was a success! We can save it as a ``log_record`` in our ``log_list`` to be stored in the database later. - -Now let's look inside the except statement. What happens if things go wrong?:: - - logger.info(f'''Error for mobilize user {mobilize_user['mobilizeid']}. - Error: {str(e)}''') - - # Create a record of our failures - log_record = { - 'mobilizeid': mobilize_user['mobilizeid'], - 'actionnetworkid': None, - 'synced': False, - 'errors': str(e)[:999], - 'date': str(datetime.now()) - } - - # Add the record of our greatest failures to the history books - loglist.append(log_record) - -If things go wrong, we log that information for later. Note that line ``str(e)[:999]``. That's us getting information about the error out of the error object, ``e``. - -Finally, once we've looped through all our Mobilize users, we're ready to save our log tables to the database:: - - if new_mobilize_users.num_rows > 0: - logtable = Table(loglist) - errors_count = logtable.select_rows("{synced} is False").num_rows - success_count = logtable.select_rows("{synced} is True").num_rows - - logger.info(f'''Successfully synced {success_count} mobilize users and failed to sync {errors_count}''') - - my_rs_warehouse.copy(tbl=logtable, table_name='mobilize_schema.mobilize_to_actionnetwork_log', if_exists='append', alter_table=True) - -Note that our log records can be turned into a Parsons Table just like any other kind of data! And note that we're again using ``copy`` to copy data into our database. - -And that's it! - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Scheduling Jobs With Container Scripts -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Different platforms allow you to schedule jobs in different ways. Civis lets you schedule jobs using container scripts. - -A Civis container script runs your Python code on a remote server for you. Under the hood, Civis takes your Python script from where it is stored in GitHub and runs it a Docker remote server environment. - -`GitHub `_ is the google docs of coding, an online service for collaborating with a team as you write scripts. It's where we maintain `Parsons `_ itself. - -`Docker `_ is a service that lets you create a remote environment that includes all of the Python packages your script needs to run. TMC maintains a `Parsons docker image `_ that you can use - or that you can tell Civis to use! - -Put all these pieces together and you get a virtual computer with Parsons pre-installed where you can run the specified script. Civis orchestrates this, and also allows you to pass parameters into the script - for example, the API keys for Action Network or Redshift. - -Let's look at the civis container script for this project: - -.. image:: ../_static/images/civis_container_script.png - -You can see where we're specifying the Docker Image, the Github repository where you can find our script, and the command to run our script. - -This container script can now be scheduled using the Civis scheduling interface. \ No newline at end of file diff --git a/docs/training_guides/getting_set_up.rst b/docs/training_guides/getting_set_up.rst deleted file mode 100644 index 237762726b..0000000000 --- a/docs/training_guides/getting_set_up.rst +++ /dev/null @@ -1,178 +0,0 @@ -=========================== -Getting Set Up With Parsons -=========================== - -This training guide will walk you through setting up Parsons on your computer. It provides in-depth explanations of each of the tools we recommend you use, including the command line, virtual environments, and git/version control. No prior experience should be necessary to follow this guide. - -You can suggest improvements to this guide or request additional guides by filing an issue in our issue tracker or telling us in Slack. To get added to our Slack, email us at *engineering@movementcooperative.org*. - -**************************************** -Step 1: Open Up a Command Line Interface -**************************************** - -Command line interfaces let you do a lot of different things on your computer, including installing and running programs and navigating the directory structure of your computer. - -On Macs/Linux the default command line interface is called a **Terminal** and on Windows it is called the **Command Prompt**. Command line interfaces are also sometimes called *shells*. Look for this program on your computer and open it up. - -The commands you can use in the command line differ somewhat dependning on whether you're using Mac/Linux or windows. - -**Mac/Linux**: - -* You can use ``pwd`` (“print working directory”) to figure out where you currently are. -* To move around, use ``cd`` (for example ``cd ..`` means "go up one directory" or ``cd my_folder`` which means "go into my_folder"). -* Use ``ls`` to list all the files and folders in your current directory. -* A `Mac/Linux command line cheat sheet `_ can help you keep track of which commands are which. - -**Windows**: - -* You can use ``cd`` to figure out where you currently are. -* To move around, use ``cd`` (for example ``cd ..`` means "go up one directory" or ``cd my_folder`` which means "go into my_folder"). -* Use ``dir`` to list all the files and folders in a directory. -* A `Windows command line cheat sheet `_ can help you keep track of which commands are which. - -You do not have to type everything on the command line out by hand. You can auto-complete the names of files/folders in your current directory by tapping the tab key. On Mac/Linux you can also tab-complete installed programs. And you can access previous commands via the up and down arrows. Save your hands! Learn these tricks. - -*************************************** -Step 2: Set Up Your Virtual Environment -*************************************** - -Normally, tools like `pip `_ install Python libraries directly to your system. When your Python programs run, they look for the libraries they depend upon in your system. But this can cause problems when different programs need different versions of the same library. - -To handle this issue, we recommend you use virtual environments to install Parsons. Virtual environments allow you to install libraries into an isolated environment specific to a project. That way you can use different versions of the same libraries for different projects. - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Mac/Linux Virtual Environments -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Before getting started, check which version of Python you’re running by typing ``python --version`` in your command line. Python 3.4+ includes a virtual environment manager called `venv `_. If your version is lower than Python 3.4, you'll have to install a virtual environment manager like `virtualenv `_ if you haven't already. You can do this by typing ``pip install virtualenv`` in the command line. - -Next, create a directory to store your virtual environments, for example at the path */home/your_name/virtualenvs*. (Not sure what a path is? see :ref:`Paths vs $PATHs`.) - -You can use the ``mkdir`` command to create a new directory, ie: ``mkdir /home/username/virtualenvs``. We'll refer to the full path to this directory as **$path_to_your_env** below. - -The next step is to create your virtual environment within this directory. The commands are different based on whether you're on Python 3.4+ and using venv, or whether you're using an older version with the virtualenv program you just installed. - -**If you’ve got Python 3.4 or higher**, type ``python -m venv $path_to_your_env/$your_env_name``. The path should be the directory you created to store the virtual environments, and the environment name is a new name chosen by you. - -**If you’ve got a lower Python version**, type ``virtualenv $path_to_your_env/$your_env_name``. Again, the path should be the directory for storing virtual environments, and the env name is a new name. - -Regardless of what version you're on, you can activate your virtual environment with the command: ``source $path_to_your_env/$your_env_name/bin/activate``. - -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Windows Virtual Environments -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Start by installing virtualenvwrappers from source:: - - git clone git://github.com/davidmarble/virtualenvwrapper-win.git - cd virtualenvwrapper-win - python setup.py install - -Not familiar with git? Read :ref:`our intro below`. - -Find the Scripts\ directory for your Python installation, such as ``C:\Users\\AppData\Local\Programs\Python\Python37\Scripts\``. - -Add the Scripts\ directory to your $PATH. (Not sure what a $PATH is? see :ref:`Paths vs $PATHs`.) - -To create a virtual environment for Parsons, execute: ``mkvirtualenv parsons`` - -To use this virtual environment, execute: ``workon parsons`` - -.. _path-explainer: - -^^^^^^^^^^^^^^^ -Paths vs $PATHs -^^^^^^^^^^^^^^^ - -Paths are how we refer to a file or program's location in the file system. For example, ``/home/janedoe/virtualenvs`` says that within the top-level directory ``home`` there is a directory named ``janedoe``, and within ``janedoe`` there is a directory ``virtualenvs``. - -``/home/janedoe/virtualenvs`` is an **absolute path** because it specifies exactly how to get there no matter where you are in the system. The path ``janedoe/virtualenvs`` is a **relative path** because it only works if you use it from the home directory. Trying to use a relative path from the wrong location is a common source of command line errors! - -On Windows, absolute paths look a little different. They start with the letter of the hard drive they're in, ie ``C:\Users\JaneDoe\Virtualenvs``. - -In these instructions we try to use absolute paths, even though they're a little wordier, because it's less likely to cause problems for you if you run them from an unexpected place. - -In addition to paths, there's an important environmental variable called **$PATH**. The $PATH is a list of absolute paths your computer will check when searching for installed libraries and scripts. You can check what's currently in your $PATH by typing ``echo $PATH`` (Mac/Linux) or ``echo %PATH%`` (Windows). - -When you activate your virtual environment, the path to the environment is placed as the first path. Paths are checked in order from first to last. You can check what packages have been installed in your virtualenv (and thus should be available on the path when the virtualenv is activated) by looking in ``lib/site-packages``. - -If you’re trying to run something you’ve installed, but your computer says it doesn’t exist, it may be because the computer doesn't have the right information in its $PATH. This happens to me all the time when I forget to activate my virtual environment! - -************************************ -Step 3: Download and Install Parsons -************************************ - -We're going to go over two different ways to download and install Parsons: using pip, and using git. Use pip if you just want to install Parsons and start using it. Use git if you might want to change Parsons to customize its behavior and/or contribute those changes back. - -^^^^^^^^^ -Using Pip -^^^^^^^^^ - -`Pip `_ is the Python package manager. Packages (also commonly known as “libraries”) are Python code that have been bundled up in a certain way (“packaged”) so they can be easily installed and used. - -By default, pip installs from the `Python Package Index or PyPI `_, but you can tell pip to install from a branch on Github or even from a folder on your machine. All you need is a package with the right files. The specifics of those files, and how to create your own package, is a `much more advanced topic `_. - -Essentially when you type ``pip install parsons[all]`` (or pip install anything!) you’re saying “Go find this project on PyPI and install it.” (Here’s `Parsons `_ on PyPI!) - -To install Parsons using pip, make sure your virtual environment is activated and type ``pip install parsons[all]``. It's that simple! - -.. _git-explainer: - -^^^^^^^^^ -Using Git -^^^^^^^^^ - -`Git `_ is a popular version control system used primarily by programmers. Many people use git by way of `Github `_, a company which provides free hosting (and other helpful features) for git repositories. Parsons, like many others, `hosts our code `_ on Github. - -Start by making sure git is installed on your computer. To do this, type ``git version`` at the command line. If it gives you a version number, great! You've got git installed. If you get an error message of some kind, you'll need to `install git `_. - -Once you've installed git, you can execute the following commands:: - - git clone https://github.com/move-coop/parsons.git - cd parsons - pip install -r requirements.txt - python setup.py install - -These commands say, in order: - -* make a copy of the Parsons repository on my computer -* change directories so I'm now in the top level of that repository -* install all the libraries listed in the file ``requirements.txt`` -* see the file in this directory named ``setup.py``? run it to install this package - -You should now have a copy of Parsons installed locally and ready to use! - -.. note:: - - When you install Parsons from git, you're getting the most up to date version of Parsons there is. When you install Parsons from PyPI via pip, you might get a slightly older version, since we have to take the extra step of making a "release" to move changes from Github to PyPI. We make releases fairly frequently, so this shouldn't be an issue, but it's something to keep in mind if Parsons is behaving unexpectedly. - -$$$$$$$$$$ -Git Basics -$$$$$$$$$$ - -Giving you a full tour of git is beyond the scope of this tutorial, but here's a quick intro. - -Git allows you to connect the work you're doing locally with a central shared repository. When you enter a command like ``git clone https://github.com/move-coop/parsons.git``, git creates a copy of the repository on your local computer. It also keeps track of the source of your repository, by listing it as a **remote**. Git's default name for remotes is **origin**. - -You can see all the remotes for a repository by typing the following command when within the repository: ``git remote -v``. (The -v stands for "verbose".) The result should look something like this:: - - origin https://github.com/move-coop/parsons.git (fetch) - origin https://github.com/move-coop/parsons.git (push) - -*Wait*, you might be asking, *what's this 'fetch' and 'pull' business?* **Fetch** is the command you use to get changes from a remote. **Push** is the command you use to send changes to a remote. Although the locations you fetch/pull from and push to can be different, practically speaking they're almost always the same. - -To get the most recent version of a remote, use the command ``git fetch origin main``. That means "get any changes from the branch named main on the remote named origin, but don't apply it yet". You apply what you've gotten with the command ``git merge origin main``. Many people combine these two steps with the command ``git pull origin main``. - -Let's say you've made some local changes you want to send back to the remote. You can add your changes to a list of things to be committed with the command ``git add $filename``. You have to add at least one file, but you can add as many as you want. If you get confused about what's available to be added, the command ``git status`` will show you what's changed in your repository since the last commit, and whether or not its been added with ``git add`` yet. - -Once you're done adding, bundle everything together with the command ``git commit -m "message"``. Use the "message" to briefly summarize your changes. Once you have added and committed your code, you can send it back to the remote with the command ``git push origin main``. - -Pushing might be a bit more complicated than that, dependening on who else has pushed to the origin while you were working, or whether you're pushing to a codebase like Parsons that requires you to submit changes via Pull Requests, but that's enough for now. - -Interested in learning more? Try `this tutorial `_ or reach out on Slack to request a mentor or more advanced training. - -********** -Conclusion -********** - -You should now have Parsons installed on your computer, and hopefully you're also more comfortable with the command line, virtual environments, paths, and git. diff --git a/docs/use_cases/civis_job_status_slack_alert.rst b/docs/use_cases/civis_job_status_slack_alert.rst deleted file mode 100644 index 2f7bd94fba..0000000000 --- a/docs/use_cases/civis_job_status_slack_alert.rst +++ /dev/null @@ -1,14 +0,0 @@ -=============== -Civis Job Status Slack Alert -=============== - -The Movement Cooperative uses Civis Platform to run the data syncs its members rely on. -Civis users are able to monitor the status of these pipelines within the Civis interface, but not all stakeholders are Civis users and if a problem occurs folks have to navigate to another system, such as Slack or TMC's support ticket system, to flag the issue, collaborate on troubleshooting, and get updates on the resolution. - -To make this process more efficient, promote transparency, and facilitate collaboration, the TMC Engineering team wrote a script using Parsons to post Civis data sync statuses to an internal Slack channel. The statuses include a green check mark emoji if a sync has run successfully that day, a red X if a sync has failed, a running person if a sync was running at the time the script checked its status, and a person shrugging emoji if the script was unable to determine the status of the sync. - -Data syncs in Civis can take a variety of forms. Some are single jobs, some are a specific type of job called an import, and others consist of multiple jobs chained together in a workflow. The Civis API treats these objects slightly differently, so the script accounts for that by parsing out each type from the API response and combining them into one Parsons Table with a column for object_type. - -At the request of Indivisible, the TMC Data & Technology team templatized this script to allow for creating customized versions for member organizations with only the specific syncs they depend on. Indivisible now has their own daily alert on the status of their data syncs and other important scripts populating in a private Slack channel. - -The code we used is available as a `sample script `_ for you to view, re-use, and/or customize. diff --git a/docs/use_cases/mysql_to_googlesheets.rst b/docs/use_cases/mysql_to_googlesheets.rst deleted file mode 100644 index c2e2c32ffc..0000000000 --- a/docs/use_cases/mysql_to_googlesheets.rst +++ /dev/null @@ -1,9 +0,0 @@ -=============== -MySQL to Google Sheets Export -=============== - -ActionKit, a commonly used tool in the progressive ecosystem, provides its clients with access to their data in a MySQL database, which is very useful for data practitioners who write SQL but less so for users who do not write code. 350.org needed a way to track their progress to their annual key performance indicators (KPIs) that relied on ActionKit data but was accessible by many staff across the organization. Together with the Movement Cooperative (TMC) they were able to create a solution using the Parsons MySQL Database and Google Sheets classes. - -350.org and TMC developed a script to query the MySQL database where 350’s ActionKit data lives and export the query results to a Google Sheet. One important function within the script is the try_overwrite function, which sets a maximum number of times the script will attempt to write data to a Google Sheet before erroring. This function is designed to help navigate Google’s limits on the number of calls per minute. - -The original script contained four different queries and saved each query’s results in a different tab in the newly created Google Sheets workbook. A simplified version of the code we used is available as a `sample script `_ for you to view, re-use, or customize to fit your needs. diff --git a/docs/use_cases/opt_outs_to_everyaction.rst b/docs/use_cases/opt_outs_to_everyaction.rst deleted file mode 100644 index 1c0c268554..0000000000 --- a/docs/use_cases/opt_outs_to_everyaction.rst +++ /dev/null @@ -1,17 +0,0 @@ -=============== -Opt-outs to EveryAction -=============== - -As carriers tighten restrictions on peer-to-peer texting through 10DLC rules, it becomes more and more crucial for organizations to avoid texting people who have opted out of their communications. This is a challenge for organizations who pull outreach lists from EveryAction or VAN but run their actual texting program in another tool because their opt-outs are tracked in a different system from the one where they pull their lists. A number of commonly used texting tools have integrations with EveryAction and VAN but they don't always sync opt-out dispositions back into EveryAction/VAN. - -The Movement Cooperative worked with a couple of different member organizations to create a script using Parsons that would opt-out phone numbers in EveryAction to prevent them from being pulled into future outreach lists. The script only updates existing records, it does not create new ones, so it requires you to provide a VAN ID and assumes that the people you want to opt out already exist in EveryAction. - -The script requires the user to provide a table containing columns for phone number (must be named `phone`), committee ID (must be named `committeeid`), and vanid (must be named `vanid`). - -Some questions to consider when you construct this table are: - -- Which committees do you want to opt people out in? -- Multiple people can have the same phone number assigned to them in EveryAction. Do you want to opt out a phone regardless of who it's associated with, or do you want to attempt to identify the specific person who opted out in your texting tool? -- People can have multiple phone numbers associated with them in EveryAction. Do you want to opt out just the specific phone number that shows up in the texting tool data or all phones associated with a given person? - -The code we used is available as a `sample script `_ for you to view, re-use, or customize to fit your needs. diff --git a/docs/zoom.rst b/docs/zoom.rst index 6944b2dbfa..ea49171b43 100644 --- a/docs/zoom.rst +++ b/docs/zoom.rst @@ -7,23 +7,21 @@ Overview `Zoom `_ is a video conferencing platform. This connector supports fetching users, fetching meetings, fetching metadata for past meetings, and fetching -participants of past meetings via the `Zoom API `_. +participants of past meetings via the `Zoom API `_. .. note:: Authentication - The ``Zoom`` class uses server-to-server `OAuth ` - to authenticate queries to the Zoom API. You must create a server-to-server application in - `Zoom's app marketplace ` to obtain an - ``account_id``, ``client_id``, and ``client_secret`` key. You will use this OAuth application to define your scopes, - which gives your ``Zoom`` connector read permission on endpoints of your choosing (`meetings`, `webinars`, etc.) + The ``Zoom`` class supports `JSON Web Token Authentication `_. + You must `Create a JWT App `_ to obtain + an API Key and API Secret for authentication. *********** Quick Start *********** -To instantiate the ``Zoom`` class, you can either store your Zoom account ID, client ID, and client secret -as environmental variables (``ZOOM_ACCOUNT_ID``, ``ZOOM_CLIENT_ID``, ``ZOOM_CLIENT_SECRET``) -or pass them in as arguments. +To instantiate the ``Zoom`` class, you can either store your Zoom API +key and secret as environmental variables (``ZOOM_API_KEY`` and ``ZOOM_API_SECRET``, +respectively) or pass them in as arguments: .. code-block:: python @@ -34,11 +32,7 @@ or pass them in as arguments. zoom = Zoom() # If providing authentication credentials via arguments - zoom = Zoom( - account_id="my_account_id", - client_id="my_client_id", - client_secret="my_client_secret" - ) + zoom = Zoom(api_key='my_api_key', api_secret='my_api_secret') # Get a table of host's meetings via their email or user id meetings_tbl = zoom.get_meetings('my_name@mail.com') diff --git a/output.csv b/output.csv new file mode 100644 index 0000000000..03ee2ca78f --- /dev/null +++ b/output.csv @@ -0,0 +1,2113 @@ +ContactsContactID,VanID,ResultID,CanvassedBy,CommitteeID,CreatedBy,DateCreated,DateCanvassed,InputTypeID,ContactTypeID,ChangeTypeId,ErrorMessage +1880011,101553642,1,2196065,92476,2196065,10/8/2021 10:50:00 PM,10/8/2021 6:42:00 PM,14,2,1, +1880012,101548547,14,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1880013,101548432,14,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:53:00 PM,14,2,1, +1880014,101548604,14,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:23:00 PM,14,2,1, +1880015,101538512,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:02:00 PM,14,2,1, +1880016,101552788,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1880017,101220848,14,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:01:00 PM,14,2,1, +1880018,101466084,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:36:00 PM,14,2,1, +1880019,101464466,23,2191385,92476,2191385,10/8/2021 10:50:00 PM,10/8/2021 9:30:00 PM,14,2,1, +1880020,101492691,14,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:49:00 PM,14,2,1, +1880021,101223376,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:32:00 PM,14,2,1, +1880022,101492940,14,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:03:00 PM,14,2,1, +1880023,101492691,14,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:56:00 PM,14,2,1, +1880024,101489683,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:17:00 PM,14,2,1, +1880025,101487492,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:38:00 PM,14,2,1, +1880026,101466084,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:36:00 PM,14,2,1, +1880027,101464466,23,2197476,92476,2197476,10/8/2021 10:50:00 PM,10/8/2021 9:30:00 PM,14,2,1, +1880028,101537949,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:19:00 PM,14,2,1, +1880029,101549164,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:48:00 PM,14,2,1, +1880030,101538512,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1880031,101541475,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1880032,101538734,14,2188252,92476,2188252,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880033,101541315,14,2188252,92476,2188252,10/8/2021 10:50:00 PM,10/8/2021 9:28:00 PM,14,2,1, +1880034,101481551,14,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:34:00 PM,14,2,1, +1880035,101482263,2,2194389,92476,2194389,10/8/2021 10:50:00 PM,10/8/2021 8:03:00 PM,14,2,1, +1880036,101486267,14,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1880037,101535459,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:56:00 PM,14,2,1, +1880038,101539958,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:41:00 PM,14,2,1, +1880039,101544436,1,2194893,92476,2194893,10/8/2021 10:50:00 PM,10/8/2021 7:16:00 PM,14,2,1, +1880040,101545210,1,2194893,92476,2194893,10/8/2021 10:50:00 PM,10/8/2021 7:20:00 PM,14,2,1, +1880041,101548876,23,2196052,92476,2196052,10/8/2021 10:50:00 PM,10/8/2021 7:46:00 PM,14,2,1, +1880042,101549106,14,2196052,92476,2196052,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1880043,101552131,23,2196052,92476,2196052,10/8/2021 10:50:00 PM,10/8/2021 7:48:00 PM,14,2,1, +1880044,101553232,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:14:00 PM,14,2,1, +1880045,101555334,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:22:00 PM,14,2,1, +1880046,101555699,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1880047,101536042,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:28:00 PM,14,2,1, +1880048,101538533,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:21:00 PM,14,2,1, +1880049,101542189,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:17:00 PM,14,2,1, +1880050,101552985,3,2194352,92476,2194352,10/8/2021 10:50:00 PM,10/8/2021 9:05:00 PM,14,2,1, +1880051,101553435,2,2194352,92476,2194352,10/8/2021 10:50:00 PM,10/8/2021 10:06:00 PM,14,2,1, +1880052,101555522,3,2194352,92476,2194352,10/8/2021 10:50:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1880053,101480866,14,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1880054,101484949,14,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1880055,101480866,14,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1880056,101486267,14,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1880057,101486803,14,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:40:00 PM,14,2,1, +1880058,101548512,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1880059,101552152,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:49:00 PM,14,2,1, +1880060,101218457,14,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:21:00 PM,14,2,1, +1880061,101221076,14,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:43:00 PM,14,2,1, +1880062,101222692,14,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:10:00 PM,14,2,1, +1880063,101544428,3,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:15:00 PM,14,2,1, +1880064,101548255,14,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:19:00 PM,14,2,1, +1880065,101539319,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:25:00 PM,14,2,1, +1880066,101552682,14,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:02:00 PM,14,2,1, +1880067,101553712,14,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1880068,101553822,14,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:02:00 PM,14,2,1, +1880069,101555143,14,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:51:00 PM,14,2,1, +1880070,101548494,14,2196545,92476,2196545,10/8/2021 10:50:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1880071,101548637,14,2196545,92476,2196545,10/8/2021 10:50:00 PM,10/8/2021 7:18:00 PM,14,2,1, +1880072,101549070,14,2196545,92476,2196545,10/8/2021 10:50:00 PM,10/8/2021 7:24:00 PM,14,2,1, +1880073,101549403,14,2196545,92476,2196545,10/8/2021 10:50:00 PM,10/8/2021 6:51:00 PM,14,2,1, +1880074,101475815,14,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:33:00 PM,14,2,1, +1880075,101481768,14,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:59:00 PM,14,2,1, +1880076,101484447,14,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:17:00 PM,14,2,1, +1880077,101485307,14,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:09:00 PM,14,2,1, +1880078,101490888,14,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1880079,101221145,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1880080,101221468,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:44:00 PM,14,2,1, +1880081,101221763,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:35:00 PM,14,2,1, +1880082,101222287,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:11:00 PM,14,2,1, +1880083,101225142,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:40:00 PM,14,2,1, +1880084,101548639,14,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:11:00 PM,14,2,1, +1880085,101548880,14,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:49:00 PM,14,2,1, +1880086,101549018,14,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:02:00 PM,14,2,1, +1880087,101549132,14,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:50:00 PM,14,2,1, +1880088,101549419,14,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:59:00 PM,14,2,1, +1880089,101537780,14,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:49:00 PM,14,2,1, +1880090,101538264,14,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:55:00 PM,14,2,1, +1880091,101539185,14,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:46:00 PM,14,2,1, +1880092,101541562,14,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:59:00 PM,14,2,1, +1880093,101542177,14,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:52:00 PM,14,2,1, +1880094,101538441,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1880095,101538658,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:02:00 PM,14,2,1, +1880096,101541072,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1880097,101541638,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:14:00 PM,14,2,1, +1880098,101541870,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:59:00 PM,14,2,1, +1880099,101548600,14,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:50:00 PM,14,2,1, +1880100,101548674,14,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:24:00 PM,14,2,1, +1880101,101548970,14,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:34:00 PM,14,2,1, +1880102,101549046,14,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:46:00 PM,14,2,1, +1880103,101549099,14,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:49:00 PM,14,2,1, +1880104,101549170,14,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1880105,101549174,14,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:01:00 PM,14,2,1, +1880106,101553091,14,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:47:00 PM,14,2,1, +1880107,101553257,14,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1880108,101555727,14,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880109,101478859,14,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:12:00 PM,14,2,1, +1880110,101480215,14,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:30:00 PM,14,2,1, +1880111,101548556,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1880112,101549009,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:47:00 PM,14,2,1, +1880113,101549135,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:32:00 PM,14,2,1, +1880114,101552245,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:10:00 PM,14,2,1, +1880115,101552377,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:52:00 PM,14,2,1, +1880116,101555862,14,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:34:00 PM,14,2,1, +1880117,101537148,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1880118,101540799,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:27:00 PM,14,2,1, +1880119,101540813,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:16:00 PM,14,2,1, +1880120,101541061,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:19:00 PM,14,2,1, +1880121,101483843,14,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:10:00 PM,14,2,1, +1880122,101219309,14,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:41:00 PM,14,2,1, +1880123,101220386,14,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1880124,101221282,14,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1880125,101221654,14,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:32:00 PM,14,2,1, +1880126,101222315,14,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:59:00 PM,14,2,1, +1880127,101222568,14,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:46:00 PM,14,2,1, +1880128,101224720,14,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:15:00 PM,14,2,1, +1880129,101225540,14,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:03:00 PM,14,2,1, +1880130,101220101,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:36:00 PM,14,2,1, +1880131,101221012,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1880132,101222913,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:37:00 PM,14,2,1, +1880133,101223264,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:41:00 PM,14,2,1, +1880134,101223324,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:49:00 PM,14,2,1, +1880135,101224149,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1880136,101225077,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:33:00 PM,14,2,1, +1880137,101225296,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:51:00 PM,14,2,1, +1880138,101478335,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:22:00 PM,14,2,1, +1880139,101479610,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:05:00 PM,14,2,1, +1880140,101480233,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:20:00 PM,14,2,1, +1880141,101483433,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:00:00 PM,14,2,1, +1880142,101486081,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:16:00 PM,14,2,1, +1880143,101487865,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:46:00 PM,14,2,1, +1880144,101491092,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1880145,101219085,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:03:00 PM,14,2,1, +1880146,101219359,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:40:00 PM,14,2,1, +1880147,101220264,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:15:00 PM,14,2,1, +1880148,101221531,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:45:00 PM,14,2,1, +1880149,101223179,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:52:00 PM,14,2,1, +1880150,101223800,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:39:00 PM,14,2,1, +1880151,101225280,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:40:00 PM,14,2,1, +1880152,101541101,14,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1880153,101542061,14,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:18:00 PM,14,2,1, +1880154,101548509,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:59:00 PM,14,2,1, +1880155,101548646,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1880156,101548657,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:29:00 PM,14,2,1, +1880157,101548918,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:10:00 PM,14,2,1, +1880158,101549215,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1880159,101549435,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:40:00 PM,14,2,1, +1880160,101551928,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:56:00 PM,14,2,1, +1880161,101551990,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1880162,101552363,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1880163,101548954,14,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:47:00 PM,14,2,1, +1880164,101548989,14,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:56:00 PM,14,2,1, +1880165,101548448,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:02:00 PM,14,2,1, +1880166,101548457,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1880167,101548680,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:51:00 PM,14,2,1, +1880168,101548736,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:59:00 PM,14,2,1, +1880169,101548804,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880170,101548900,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:17:00 PM,14,2,1, +1880171,101548927,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:34:00 PM,14,2,1, +1880172,101549237,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1880173,101548642,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:49:00 PM,14,2,1, +1880174,101548916,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1880175,101549020,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:55:00 PM,14,2,1, +1880176,101549181,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1880177,101538989,14,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1880178,101218952,14,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:51:00 PM,14,2,1, +1880179,101220091,14,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:57:00 PM,14,2,1, +1880180,101220130,14,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1880181,101220617,14,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:26:00 PM,14,2,1, +1880182,101220697,14,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:23:00 PM,14,2,1, +1880183,101220966,14,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:32:00 PM,14,2,1, +1880184,101221685,14,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:11:00 PM,14,2,1, +1880185,101222146,14,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1880186,101223593,14,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:41:00 PM,14,2,1, +1880187,101225085,14,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1880188,101225540,14,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:03:00 PM,14,2,1, +1880189,101535519,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:53:00 PM,14,2,1, +1880190,101536067,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:28:00 PM,14,2,1, +1880191,101536378,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:49:00 PM,14,2,1, +1880192,101536519,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:23:00 PM,14,2,1, +1880193,101538034,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:40:00 PM,14,2,1, +1880194,101538373,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:28:00 PM,14,2,1, +1880195,101538923,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:07:00 PM,14,2,1, +1880196,101539248,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:58:00 PM,14,2,1, +1880197,101540847,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:25:00 PM,14,2,1, +1880198,101542004,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:08:00 PM,14,2,1, +1880199,101542068,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:05:00 PM,14,2,1, +1880200,101552471,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:42:00 PM,14,2,1, +1880201,101552876,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:16:00 PM,14,2,1, +1880202,101553174,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:28:00 PM,14,2,1, +1880203,101553605,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:46:00 PM,14,2,1, +1880204,101553652,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:12:00 PM,14,2,1, +1880205,101553664,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1880206,101553820,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1880207,101555387,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:25:00 PM,14,2,1, +1880208,101555772,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:48:00 PM,14,2,1, +1880209,101555809,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:27:00 PM,14,2,1, +1880210,101555862,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1880211,101535579,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:27:00 PM,14,2,1, +1880212,101536055,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:38:00 PM,14,2,1, +1880213,101536931,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:06:00 PM,14,2,1, +1880214,101537937,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:36:00 PM,14,2,1, +1880215,101538130,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:59:00 PM,14,2,1, +1880216,101538325,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1880217,101540052,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:24:00 PM,14,2,1, +1880218,101540112,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:29:00 PM,14,2,1, +1880219,101540596,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:56:00 PM,14,2,1, +1880220,101540728,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1880221,101540947,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:55:00 PM,14,2,1, +1880222,101541546,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:53:00 PM,14,2,1, +1880223,101219411,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:53:00 PM,14,2,1, +1880224,101219458,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:26:00 PM,14,2,1, +1880225,101221909,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1880226,101223497,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:08:00 PM,14,2,1, +1880227,101460126,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:31:00 PM,14,2,1, +1880228,101460262,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:58:00 PM,14,2,1, +1880229,101460401,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1880230,101462481,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:51:00 PM,14,2,1, +1880231,101462925,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:30:00 PM,14,2,1, +1880232,101463092,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:49:00 PM,14,2,1, +1880233,101466173,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1880234,101466654,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:03:00 PM,14,2,1, +1880235,101467163,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:18:00 PM,14,2,1, +1880236,101467343,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1880237,101467624,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:39:00 PM,14,2,1, +1880238,101469868,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:38:00 PM,14,2,1, +1880239,101477700,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1880240,101478625,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:14:00 PM,14,2,1, +1880241,101482151,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:15:00 PM,14,2,1, +1880242,101491402,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:26:00 PM,14,2,1, +1880243,101555672,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880244,101555672,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:08:00 PM,14,2,1, +1880245,101552687,2,2196068,92476,2196068,10/8/2021 10:50:00 PM,10/8/2021 7:29:00 PM,14,2,1, +1880246,101552759,3,2196068,92476,2196068,10/8/2021 10:50:00 PM,10/8/2021 8:04:00 PM,14,2,1, +1880247,101552776,1,2196068,92476,2196068,10/8/2021 10:50:00 PM,10/8/2021 9:10:00 PM,14,2,1, +1880248,101553398,1,2196068,92476,2196068,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1880249,101553481,1,2196068,92476,2196068,10/8/2021 10:50:00 PM,10/8/2021 8:30:00 PM,14,2,1, +1880250,101555225,1,2196068,92476,2196068,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1880251,101555297,1,2196068,92476,2196068,10/8/2021 10:50:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1880252,101555310,3,2196068,92476,2196068,10/8/2021 10:50:00 PM,10/8/2021 10:15:00 PM,14,2,1, +1880253,101555541,2,2196068,92476,2196068,10/8/2021 10:50:00 PM,10/8/2021 9:55:00 PM,14,2,1, +1880254,101555559,3,2196068,92476,2196068,10/8/2021 10:50:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1880255,101555655,1,2196068,92476,2196068,10/8/2021 10:50:00 PM,10/8/2021 8:59:00 PM,14,2,1, +1880256,101549035,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:13:00 PM,14,2,1, +1880257,101540512,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:55:00 PM,14,2,1, +1880258,101541289,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:12:00 PM,14,2,1, +1880259,101541668,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:54:00 PM,14,2,1, +1880260,101552816,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:04:00 PM,14,2,1, +1880261,101552877,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:17:00 PM,14,2,1, +1880262,101553089,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:49:00 PM,14,2,1, +1880263,101553279,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:04:00 PM,14,2,1, +1880264,101553579,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:48:00 PM,14,2,1, +1880265,101553874,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:25:00 PM,14,2,1, +1880266,101555151,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:34:00 PM,14,2,1, +1880267,101555245,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1880268,101555246,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1880269,101555337,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:18:00 PM,14,2,1, +1880270,101555409,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:22:00 PM,14,2,1, +1880271,101555412,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:30:00 PM,14,2,1, +1880272,101555706,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:37:00 PM,14,2,1, +1880273,101555856,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1880274,101465396,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:01:00 PM,14,2,1, +1880275,101465764,14,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1880276,101535362,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:15:00 PM,14,2,1, +1880277,101535638,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:23:00 PM,14,2,1, +1880278,101535647,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1880279,101537843,10,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:27:00 PM,14,2,1, +1880280,101538196,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:12:00 PM,14,2,1, +1880281,101538410,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:26:00 PM,14,2,1, +1880282,101539196,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:17:00 PM,14,2,1, +1880283,101539614,2,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:10:00 PM,14,2,1, +1880284,101540161,2,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:29:00 PM,14,2,1, +1880285,101540423,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:21:00 PM,14,2,1, +1880286,101540423,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1880287,101540927,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1880288,101540927,2,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1880289,101460126,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:31:00 PM,14,2,1, +1880290,101460262,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:58:00 PM,14,2,1, +1880291,101460401,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1880292,101462481,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:51:00 PM,14,2,1, +1880293,101462556,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1880294,101462925,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:30:00 PM,14,2,1, +1880295,101463092,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:49:00 PM,14,2,1, +1880296,101463983,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:55:00 PM,14,2,1, +1880297,101466173,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1880298,101466654,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:00:00 PM,14,2,1, +1880299,101467163,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:18:00 PM,14,2,1, +1880300,101467343,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1880301,101467624,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:39:00 PM,14,2,1, +1880302,101469868,14,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:37:00 PM,14,2,1, +1880303,101548490,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:45:00 PM,14,2,1, +1880304,101548528,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:13:00 PM,14,2,1, +1880305,101548977,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:09:00 PM,14,2,1, +1880306,101548998,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:32:00 PM,14,2,1, +1880307,101553418,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:10:00 PM,14,2,1, +1880308,101535972,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:00:00 PM,14,2,1, +1880309,101536322,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1880310,101539752,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1880311,101555354,14,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:31:00 PM,14,2,1, +1880312,101475755,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:58:00 PM,14,2,1, +1880313,101480359,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880314,101481277,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:14:00 PM,14,2,1, +1880315,101481517,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1880316,101482807,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:14:00 PM,14,2,1, +1880317,101484266,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:12:00 PM,14,2,1, +1880318,101484381,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1880319,101486778,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:14:00 PM,14,2,1, +1880320,101487452,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:13:00 PM,14,2,1, +1880321,101488419,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1880322,101489689,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1880323,101490277,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:08:00 PM,14,2,1, +1880324,101490339,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:23:00 PM,14,2,1, +1880325,101491038,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:04:00 PM,14,2,1, +1880326,101491074,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:14:00 PM,14,2,1, +1880327,101491092,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:49:00 PM,14,2,1, +1880328,101493313,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:24:00 PM,14,2,1, +1880329,101535994,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1880330,101536275,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880331,101536322,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1880332,101537001,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:42:00 PM,14,2,1, +1880333,101537136,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:03:00 PM,14,2,1, +1880334,101537234,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880335,101537301,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:36:00 PM,14,2,1, +1880336,101539293,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1880337,101541382,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:46:00 PM,14,2,1, +1880338,101541679,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:54:00 PM,14,2,1, +1880339,101541838,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880340,101542108,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:09:00 PM,14,2,1, +1880341,101536350,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:15:00 PM,14,2,1, +1880342,101537532,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1880343,101537667,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:53:00 PM,14,2,1, +1880344,101539578,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:54:00 PM,14,2,1, +1880345,101540265,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:35:00 PM,14,2,1, +1880346,101541594,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1880347,101540390,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:47:00 PM,14,2,1, +1880348,101218842,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:52:00 PM,14,2,1, +1880349,101220232,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:55:00 PM,14,2,1, +1880350,101220294,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1880351,101221118,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1880352,101221192,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:19:00 PM,14,2,1, +1880353,101221422,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:14:00 PM,14,2,1, +1880354,101223058,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:21:00 PM,14,2,1, +1880355,101223492,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:24:00 PM,14,2,1, +1880356,101224548,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:04:00 PM,14,2,1, +1880357,101225276,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:02:00 PM,14,2,1, +1880358,101225363,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:08:00 PM,14,2,1, +1880359,101225464,14,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:14:00 PM,14,2,1, +1880360,101538905,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:55:00 PM,14,2,1, +1880361,101539781,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1880362,101482415,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:57:00 PM,14,2,1, +1880363,101483014,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:58:00 PM,14,2,1, +1880364,101483713,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:03:00 PM,14,2,1, +1880365,101484693,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:02:00 PM,14,2,1, +1880366,101485857,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1880367,101548431,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:12:00 PM,14,2,1, +1880368,101548503,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:23:00 PM,14,2,1, +1880369,101548513,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:42:00 PM,14,2,1, +1880370,101548687,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:50:00 PM,14,2,1, +1880371,101548699,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:50:00 PM,14,2,1, +1880372,101548841,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:45:00 PM,14,2,1, +1880373,101548898,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:02:00 PM,14,2,1, +1880374,101548958,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:52:00 PM,14,2,1, +1880375,101549002,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:03:00 PM,14,2,1, +1880376,101549069,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:52:00 PM,14,2,1, +1880377,101549075,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1880378,101549104,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:34:00 PM,14,2,1, +1880379,101549141,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:21:00 PM,14,2,1, +1880380,101549152,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:23:00 PM,14,2,1, +1880381,101549161,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:39:00 PM,14,2,1, +1880382,101549197,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:15:00 PM,14,2,1, +1880383,101549255,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:10:00 PM,14,2,1, +1880384,101549417,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:28:00 PM,14,2,1, +1880385,101551940,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:18:00 PM,14,2,1, +1880386,101552113,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:58:00 PM,14,2,1, +1880387,101552161,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1880388,101552164,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:32:00 PM,14,2,1, +1880389,101552223,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:20:00 PM,14,2,1, +1880390,101552246,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:13:00 PM,14,2,1, +1880391,101552373,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:52:00 PM,14,2,1, +1880392,101552374,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:41:00 PM,14,2,1, +1880393,101552500,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1880394,101552510,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:52:00 PM,14,2,1, +1880395,101552547,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:02:00 PM,14,2,1, +1880396,101552573,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1880397,101552574,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:11:00 PM,14,2,1, +1880398,101552604,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:10:00 PM,14,2,1, +1880399,101552643,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:45:00 PM,14,2,1, +1880400,101552725,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:10:00 PM,14,2,1, +1880401,101552870,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:04:00 PM,14,2,1, +1880402,101552906,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1880403,101553007,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:15:00 PM,14,2,1, +1880404,101553131,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:48:00 PM,14,2,1, +1880405,101553162,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:48:00 PM,14,2,1, +1880406,101553264,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:08:00 PM,14,2,1, +1880407,101553317,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:21:00 PM,14,2,1, +1880408,101553454,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:13:00 PM,14,2,1, +1880409,101553530,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:40:00 PM,14,2,1, +1880410,101553611,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:04:00 PM,14,2,1, +1880411,101553787,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:28:00 PM,14,2,1, +1880412,101553895,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:01:00 PM,14,2,1, +1880413,101555152,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:00:00 PM,14,2,1, +1880414,101555233,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1880415,101555287,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:38:00 PM,14,2,1, +1880416,101555320,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:10:00 PM,14,2,1, +1880417,101555449,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:19:00 PM,14,2,1, +1880418,101555599,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:00:00 PM,14,2,1, +1880419,101555618,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1880420,101555677,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:06:00 PM,14,2,1, +1880421,101552441,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:05:00 PM,14,2,1, +1880422,101552662,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:24:00 PM,14,2,1, +1880423,101552680,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:09:00 PM,14,2,1, +1880424,101552808,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:50:00 PM,14,2,1, +1880425,101553040,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:36:00 PM,14,2,1, +1880426,101553121,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:25:00 PM,14,2,1, +1880427,101553197,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1880428,101553319,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:27:00 PM,14,2,1, +1880429,101553590,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:46:00 PM,14,2,1, +1880430,101553602,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:34:00 PM,14,2,1, +1880431,101553606,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1880432,101553647,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:23:00 PM,14,2,1, +1880433,101553784,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:07:00 PM,14,2,1, +1880434,101553793,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:58:00 PM,14,2,1, +1880435,101553907,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1880436,101555082,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:46:00 PM,14,2,1, +1880437,101555085,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:45:00 PM,14,2,1, +1880438,101555272,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:32:00 PM,14,2,1, +1880439,101555277,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:27:00 PM,14,2,1, +1880440,101555409,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:22:00 PM,14,2,1, +1880441,101555418,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:40:00 PM,14,2,1, +1880442,101555598,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:12:00 PM,14,2,1, +1880443,101555708,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:24:00 PM,14,2,1, +1880444,101555815,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:59:00 PM,14,2,1, +1880445,101555868,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:12:00 PM,14,2,1, +1880446,101552724,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:16:00 PM,14,2,1, +1880447,101553909,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1880448,101555223,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:41:00 PM,14,2,1, +1880449,101548408,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:06:00 PM,14,2,1, +1880450,101548437,3,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1880451,101548561,3,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:21:00 PM,14,2,1, +1880452,101548605,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:51:00 PM,14,2,1, +1880453,101548705,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1880454,101548730,2,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:08:00 PM,14,2,1, +1880455,101548847,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:24:00 PM,14,2,1, +1880456,101549145,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1880457,101549412,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:22:00 PM,14,2,1, +1880458,101551918,3,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:46:00 PM,14,2,1, +1880459,101551919,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1880460,101551999,10,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1880461,101552008,40,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:13:00 PM,14,2,1, +1880462,101552092,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:59:00 PM,14,2,1, +1880463,101552191,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:02:00 PM,14,2,1, +1880464,101552229,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:09:00 PM,14,2,1, +1880465,101552281,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:08:00 PM,14,2,1, +1880466,101552326,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:22:00 PM,14,2,1, +1880467,101552386,1,2194893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:12:00 PM,14,2,1, +1880468,101552890,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:45:00 PM,14,2,1, +1880469,101555433,14,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:01:00 PM,14,2,1, +1880470,101552802,1,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:52:00 PM,14,2,1, +1880471,101552939,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:44:00 PM,14,2,1, +1880472,101553407,2,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:06:00 PM,14,2,1, +1880473,101553471,14,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:03:00 PM,14,2,1, +1880474,101553663,3,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:13:00 PM,14,2,1, +1880475,101555411,40,2196068,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:31:00 PM,14,2,1, +1880476,101552523,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:43:00 PM,14,2,1, +1880477,101552657,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:30:00 PM,14,2,1, +1880478,101552771,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:42:00 PM,14,2,1, +1880479,101552818,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:11:00 PM,14,2,1, +1880480,101552842,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:32:00 PM,14,2,1, +1880481,101552979,3,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:13:00 PM,14,2,1, +1880482,101553071,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:52:00 PM,14,2,1, +1880483,101553086,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:43:00 PM,14,2,1, +1880484,101553244,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:50:00 PM,14,2,1, +1880485,101553409,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:08:00 PM,14,2,1, +1880486,101553428,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:05:00 PM,14,2,1, +1880487,101553504,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:27:00 PM,14,2,1, +1880488,101553566,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:10:00 PM,14,2,1, +1880489,101553659,3,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:19:00 PM,14,2,1, +1880490,101553659,3,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:27:00 PM,14,2,1, +1880491,101553706,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:02:00 PM,14,2,1, +1880492,101553837,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:13:00 PM,14,2,1, +1880493,101553852,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:51:00 PM,14,2,1, +1880494,101555055,2,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:00:00 PM,14,2,1, +1880495,101555113,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1880496,101555248,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:48:00 PM,14,2,1, +1880497,101555316,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:15:00 PM,14,2,1, +1880498,101555407,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:41:00 PM,14,2,1, +1880499,101555422,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1880500,101555528,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:55:00 PM,14,2,1, +1880501,101555625,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:13:00 PM,14,2,1, +1880502,101555800,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:13:00 PM,14,2,1, +1880503,101555841,1,2196062,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:46:00 PM,14,2,1, +1880504,101219080,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1880505,101219543,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:41:00 PM,14,2,1, +1880506,101220274,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:34:00 PM,14,2,1, +1880507,101220988,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:58:00 PM,14,2,1, +1880508,101221213,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:12:00 PM,14,2,1, +1880509,101221288,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:18:00 PM,14,2,1, +1880510,101221304,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:35:00 PM,14,2,1, +1880511,101221360,3,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:09:00 PM,14,2,1, +1880512,101221375,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1880513,101221454,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:04:00 PM,14,2,1, +1880514,101221561,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:44:00 PM,14,2,1, +1880515,101221972,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:48:00 PM,14,2,1, +1880516,101222360,14,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:55:00 PM,14,2,1, +1880517,101222580,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1880518,101223319,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:41:00 PM,14,2,1, +1880519,101223449,3,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:57:00 PM,14,2,1, +1880520,101224050,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:29:00 PM,14,2,1, +1880521,101224346,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1880522,101224459,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:43:00 PM,14,2,1, +1880523,101224649,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1880524,101224703,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:21:00 PM,14,2,1, +1880525,101224875,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:41:00 PM,14,2,1, +1880526,101225234,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1880527,101225582,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:37:00 PM,14,2,1, +1880528,101225607,1,2194395,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1880529,101535340,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1880530,101535403,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:34:00 PM,14,2,1, +1880531,101535406,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:30:00 PM,14,2,1, +1880532,101535407,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:25:00 PM,14,2,1, +1880533,101535444,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:29:00 PM,14,2,1, +1880534,101535650,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:30:00 PM,14,2,1, +1880535,101536147,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:14:00 PM,14,2,1, +1880536,101536242,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:29:00 PM,14,2,1, +1880537,101536289,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:29:00 PM,14,2,1, +1880538,101537125,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:26:00 PM,14,2,1, +1880539,101537215,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:29:00 PM,14,2,1, +1880540,101537252,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:26:00 PM,14,2,1, +1880541,101537429,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:21:00 PM,14,2,1, +1880542,101537493,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:14:00 PM,14,2,1, +1880543,101537839,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:21:00 PM,14,2,1, +1880544,101537871,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:25:00 PM,14,2,1, +1880545,101537923,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:22:00 PM,14,2,1, +1880546,101537998,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:28:00 PM,14,2,1, +1880547,101538138,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:27:00 PM,14,2,1, +1880548,101538511,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:26:00 PM,14,2,1, +1880549,101538552,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:27:00 PM,14,2,1, +1880550,101538737,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:14:00 PM,14,2,1, +1880551,101539075,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:27:00 PM,14,2,1, +1880552,101539078,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:21:00 PM,14,2,1, +1880553,101539145,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:16:00 PM,14,2,1, +1880554,101539181,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1880555,101539350,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:22:00 PM,14,2,1, +1880556,101539968,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:15:00 PM,14,2,1, +1880557,101540095,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1880558,101540278,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:30:00 PM,14,2,1, +1880559,101540572,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:34:00 PM,14,2,1, +1880560,101540701,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:14:00 PM,14,2,1, +1880561,101540744,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1880562,101540913,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1880563,101541136,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:16:00 PM,14,2,1, +1880564,101541167,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:25:00 PM,14,2,1, +1880565,101541208,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:22:00 PM,14,2,1, +1880566,101541209,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1880567,101541530,1,2182923,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:30:00 PM,14,2,1, +1880568,101548479,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1880569,101548598,1,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:49:00 PM,14,2,1, +1880570,101548617,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:41:00 PM,14,2,1, +1880571,101548703,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1880572,101548729,1,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:29:00 PM,14,2,1, +1880573,101548782,1,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1880574,101548810,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:53:00 PM,14,2,1, +1880575,101548863,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:15:00 PM,14,2,1, +1880576,101549117,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:09:00 PM,14,2,1, +1880577,101549120,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:06:00 PM,14,2,1, +1880578,101549196,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1880579,101549214,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:27:00 PM,14,2,1, +1880580,101551962,1,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:34:00 PM,14,2,1, +1880581,101552249,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:19:00 PM,14,2,1, +1880582,101552266,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:30:00 PM,14,2,1, +1880583,101552303,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880584,101552328,1,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:41:00 PM,14,2,1, +1880585,101552329,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:48:00 PM,14,2,1, +1880586,101552332,1,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:50:00 PM,14,2,1, +1880587,101552361,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:11:00 PM,14,2,1, +1880588,101552384,14,2196545,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:31:00 PM,14,2,1, +1880589,101548626,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:00:00 PM,14,2,1, +1880590,101549153,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:54:00 PM,14,2,1, +1880591,101552080,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:48:00 PM,14,2,1, +1880592,101552118,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:41:00 PM,14,2,1, +1880593,101552130,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:16:00 PM,14,2,1, +1880594,101552187,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:17:00 PM,14,2,1, +1880595,101548402,1,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:06:00 PM,14,2,1, +1880596,101548413,23,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:12:00 PM,14,2,1, +1880597,101548497,23,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:31:00 PM,14,2,1, +1880598,101548565,1,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:16:00 PM,14,2,1, +1880599,101548568,1,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1880600,101548571,1,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:11:00 PM,14,2,1, +1880601,101548582,40,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1880602,101548628,3,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:03:00 PM,14,2,1, +1880603,101548633,3,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:12:00 PM,14,2,1, +1880604,101548668,23,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1880605,101548673,23,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:08:00 PM,14,2,1, +1880606,101548717,1,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:01:00 PM,14,2,1, +1880607,101548795,23,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:13:00 PM,14,2,1, +1880608,101548800,23,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:02:00 PM,14,2,1, +1880609,101548987,23,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:48:00 PM,14,2,1, +1880610,101549047,1,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:30:00 PM,14,2,1, +1880611,101549177,3,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880612,101549195,1,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:40:00 PM,14,2,1, +1880613,101549235,1,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:09:00 PM,14,2,1, +1880614,101549243,14,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1880615,101549272,1,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:30:00 PM,14,2,1, +1880616,101549401,40,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:36:00 PM,14,2,1, +1880617,101549408,2,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:29:00 PM,14,2,1, +1880618,101552203,3,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:10:00 PM,14,2,1, +1880619,101552224,23,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:08:00 PM,14,2,1, +1880620,101552243,23,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:03:00 PM,14,2,1, +1880621,101552291,31,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:26:00 PM,14,2,1, +1880622,101552352,3,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:00:00 PM,14,2,1, +1880623,101552381,10,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:06:00 PM,14,2,1, +1880624,101552382,1,2196052,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:00:00 PM,14,2,1, +1880625,101218878,3,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:54:00 PM,14,2,1, +1880626,101218903,3,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:56:00 PM,14,2,1, +1880627,101219332,3,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:08:00 PM,14,2,1, +1880628,101219342,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:57:00 PM,14,2,1, +1880629,101219601,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:28:00 PM,14,2,1, +1880630,101220435,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:22:00 PM,14,2,1, +1880631,101220708,2,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:47:00 PM,14,2,1, +1880632,101220988,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1880633,101221229,3,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:13:00 PM,14,2,1, +1880634,101221375,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880635,101221409,10,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:17:00 PM,14,2,1, +1880636,101221463,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:08:00 PM,14,2,1, +1880637,101221541,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:48:00 PM,14,2,1, +1880638,101221567,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:45:00 PM,14,2,1, +1880639,101221905,3,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:07:00 PM,14,2,1, +1880640,101222612,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:32:00 PM,14,2,1, +1880641,101223324,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:47:00 PM,14,2,1, +1880642,101223423,3,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:52:00 PM,14,2,1, +1880643,101224224,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:26:00 PM,14,2,1, +1880644,101224869,3,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:04:00 PM,14,2,1, +1880645,101225068,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:06:00 PM,14,2,1, +1880646,101225072,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:17:00 PM,14,2,1, +1880647,101225142,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:38:00 PM,14,2,1, +1880648,101225666,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:26:00 PM,14,2,1, +1880649,101225671,1,2194397,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:30:00 PM,14,2,1, +1880650,101458535,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:12:00 PM,14,2,1, +1880651,101458774,2,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:48:00 PM,14,2,1, +1880652,101458838,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:47:00 PM,14,2,1, +1880653,101459282,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1880654,101460217,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1880655,101460289,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:52:00 PM,14,2,1, +1880656,101460466,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:53:00 PM,14,2,1, +1880657,101460484,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:55:00 PM,14,2,1, +1880658,101460501,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1880659,101460539,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1880660,101460670,23,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:47:00 PM,14,2,1, +1880661,101460764,3,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880662,101461129,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1880663,101461424,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:11:00 PM,14,2,1, +1880664,101461539,3,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:35:00 PM,14,2,1, +1880665,101461647,40,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:26:00 PM,14,2,1, +1880666,101461997,2,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:03:00 PM,14,2,1, +1880667,101462671,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1880668,101463279,31,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:42:00 PM,14,2,1, +1880669,101463441,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:33:00 PM,14,2,1, +1880670,101463954,2,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1880671,101464158,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:43:00 PM,14,2,1, +1880672,101464243,3,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:27:00 PM,14,2,1, +1880673,101464620,3,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:54:00 PM,14,2,1, +1880674,101465060,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:28:00 PM,14,2,1, +1880675,101465638,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:46:00 PM,14,2,1, +1880676,101466084,3,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1880677,101466115,3,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:35:00 PM,14,2,1, +1880678,101466999,3,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:04:00 PM,14,2,1, +1880679,101467069,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:27:00 PM,14,2,1, +1880680,101467368,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1880681,101468701,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:16:00 PM,14,2,1, +1880682,101468988,23,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1880683,101469634,3,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1880684,101469815,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:49:00 PM,14,2,1, +1880685,101469862,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:05:00 PM,14,2,1, +1880686,101469997,23,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:25:00 PM,14,2,1, +1880687,101470100,3,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:24:00 PM,14,2,1, +1880688,101470226,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:46:00 PM,14,2,1, +1880689,101471254,1,2197476,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:45:00 PM,14,2,1, +1880690,101535523,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:31:00 PM,14,2,1, +1880691,101535569,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1880692,101535623,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1880693,101535695,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:03:00 PM,14,2,1, +1880694,101535882,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:23:00 PM,14,2,1, +1880695,101535960,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:38:00 PM,14,2,1, +1880696,101536153,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:52:00 PM,14,2,1, +1880697,101537058,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1880698,101537329,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:03:00 PM,14,2,1, +1880699,101537362,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1880700,101537617,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:00:00 PM,14,2,1, +1880701,101537787,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:51:00 PM,14,2,1, +1880702,101537947,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:46:00 PM,14,2,1, +1880703,101538267,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:46:00 PM,14,2,1, +1880704,101538303,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880705,101538671,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:57:00 PM,14,2,1, +1880706,101538884,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:34:00 PM,14,2,1, +1880707,101538926,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:15:00 PM,14,2,1, +1880708,101538938,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:36:00 PM,14,2,1, +1880709,101539060,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1880710,101539426,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:36:00 PM,14,2,1, +1880711,101539752,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:53:00 PM,14,2,1, +1880712,101540034,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:56:00 PM,14,2,1, +1880713,101540257,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1880714,101540456,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:08:00 PM,14,2,1, +1880715,101540619,14,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:37:00 PM,14,2,1, +1880716,101540664,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1880717,101540807,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1880718,101541011,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:42:00 PM,14,2,1, +1880719,101541089,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:40:00 PM,14,2,1, +1880720,101541121,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:59:00 PM,14,2,1, +1880721,101541180,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:15:00 PM,14,2,1, +1880722,101541269,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1880723,101541305,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1880724,101541327,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:12:00 PM,14,2,1, +1880725,101541517,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:23:00 PM,14,2,1, +1880726,101541762,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:10:00 PM,14,2,1, +1880727,101541902,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:51:00 PM,14,2,1, +1880728,101541947,3,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:35:00 PM,14,2,1, +1880729,101542335,1,2196979,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:33:00 PM,14,2,1, +1880730,101477694,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1880731,101477707,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:27:00 PM,14,2,1, +1880732,101478159,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880733,101478253,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:24:00 PM,14,2,1, +1880734,101479055,3,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:34:00 PM,14,2,1, +1880735,101479181,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:23:00 PM,14,2,1, +1880736,101479678,31,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:57:00 PM,14,2,1, +1880737,101479973,3,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1880738,101479978,23,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:56:00 PM,14,2,1, +1880739,101480036,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1880740,101481006,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:00:00 PM,14,2,1, +1880741,101481743,23,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1880742,101481917,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1880743,101482138,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:24:00 PM,14,2,1, +1880744,101482513,3,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:10:00 PM,14,2,1, +1880745,101483037,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1880746,101483253,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:40:00 PM,14,2,1, +1880747,101483814,23,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1880748,101484037,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:23:00 PM,14,2,1, +1880749,101484594,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1880750,101484924,23,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:57:00 PM,14,2,1, +1880751,101485053,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:55:00 PM,14,2,1, +1880752,101485367,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1880753,101485506,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1880754,101485550,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:40:00 PM,14,2,1, +1880755,101485644,23,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1880756,101485650,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:45:00 PM,14,2,1, +1880757,101485779,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:46:00 PM,14,2,1, +1880758,101485923,3,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:56:00 PM,14,2,1, +1880759,101486126,23,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:52:00 PM,14,2,1, +1880760,101486723,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1880761,101486803,23,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:42:00 PM,14,2,1, +1880762,101487105,23,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:40:00 PM,14,2,1, +1880763,101487135,23,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:51:00 PM,14,2,1, +1880764,101487633,3,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1880765,101488080,23,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:12:00 PM,14,2,1, +1880766,101488141,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:43:00 PM,14,2,1, +1880767,101488207,10,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:43:00 PM,14,2,1, +1880768,101488530,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:48:00 PM,14,2,1, +1880769,101488530,23,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1880770,101488585,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:55:00 PM,14,2,1, +1880771,101489789,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:49:00 PM,14,2,1, +1880772,101490327,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:22:00 PM,14,2,1, +1880773,101490438,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:53:00 PM,14,2,1, +1880774,101490907,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1880775,101491146,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1880776,101491631,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:25:00 PM,14,2,1, +1880777,101491978,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:34:00 PM,14,2,1, +1880778,101492346,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:56:00 PM,14,2,1, +1880779,101492400,1,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:01:00 PM,14,2,1, +1880780,101492530,23,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:51:00 PM,14,2,1, +1880781,101493138,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:22:00 PM,14,2,1, +1880782,101493164,2,2194389,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1880783,101458535,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:12:00 PM,14,2,1, +1880784,101458774,2,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:48:00 PM,14,2,1, +1880785,101458838,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:47:00 PM,14,2,1, +1880786,101459282,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1880787,101460217,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1880788,101460289,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:52:00 PM,14,2,1, +1880789,101460466,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:53:00 PM,14,2,1, +1880790,101460484,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:55:00 PM,14,2,1, +1880791,101460501,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1880792,101460539,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1880793,101460670,23,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:47:00 PM,14,2,1, +1880794,101460764,3,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1880795,101461129,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1880796,101461424,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:10:00 PM,14,2,1, +1880797,101461539,3,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:34:00 PM,14,2,1, +1880798,101461647,40,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:26:00 PM,14,2,1, +1880799,101461997,2,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:03:00 PM,14,2,1, +1880800,101462671,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1880801,101463279,31,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:42:00 PM,14,2,1, +1880802,101463441,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:33:00 PM,14,2,1, +1880803,101463954,2,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1880804,101464158,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:43:00 PM,14,2,1, +1880805,101464243,3,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:27:00 PM,14,2,1, +1880806,101464620,3,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:54:00 PM,14,2,1, +1880807,101465060,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:28:00 PM,14,2,1, +1880808,101465396,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:01:00 PM,14,2,1, +1880809,101465638,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:45:00 PM,14,2,1, +1880810,101465764,2,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1880811,101466084,3,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1880812,101466115,3,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:35:00 PM,14,2,1, +1880813,101466404,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:40:00 PM,14,2,1, +1880814,101466999,3,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:04:00 PM,14,2,1, +1880815,101467069,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:27:00 PM,14,2,1, +1880816,101467368,2,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1880817,101468701,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:16:00 PM,14,2,1, +1880818,101468988,23,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1880819,101469634,3,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1880820,101469815,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:49:00 PM,14,2,1, +1880821,101469862,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:05:00 PM,14,2,1, +1880822,101469997,23,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:24:00 PM,14,2,1, +1880823,101470100,23,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:24:00 PM,14,2,1, +1880824,101470226,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:46:00 PM,14,2,1, +1880825,101471254,1,2191385,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:44:00 PM,14,2,1, +1880826,101477694,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:27:00 PM,14,2,1, +1880827,101477707,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:09:00 PM,14,2,1, +1880828,101478159,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880829,101478253,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:24:00 PM,14,2,1, +1880830,101479055,3,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880831,101479181,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:23:00 PM,14,2,1, +1880832,101479678,31,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:57:00 PM,14,2,1, +1880833,101479973,3,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1880834,101479978,23,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:56:00 PM,14,2,1, +1880835,101480036,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1880836,101480855,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:32:00 PM,14,2,1, +1880837,101481006,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:00:00 PM,14,2,1, +1880838,101481917,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:47:00 PM,14,2,1, +1880839,101482138,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:08:00 PM,14,2,1, +1880840,101482263,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:03:00 PM,14,2,1, +1880841,101482513,3,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:29:00 PM,14,2,1, +1880842,101483037,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:53:00 PM,14,2,1, +1880843,101483253,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:41:00 PM,14,2,1, +1880844,101483627,3,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:33:00 PM,14,2,1, +1880845,101483814,23,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1880846,101484037,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1880847,101484594,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1880848,101484671,3,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:34:00 PM,14,2,1, +1880849,101484924,23,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:57:00 PM,14,2,1, +1880850,101485053,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:55:00 PM,14,2,1, +1880851,101485367,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:28:00 PM,14,2,1, +1880852,101485506,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1880853,101485550,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:40:00 PM,14,2,1, +1880854,101485644,23,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:36:00 PM,14,2,1, +1880855,101485650,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:52:00 PM,14,2,1, +1880856,101485779,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:46:00 PM,14,2,1, +1880857,101485923,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:49:00 PM,14,2,1, +1880858,101486126,23,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:48:00 PM,14,2,1, +1880859,101486723,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1880860,101486803,23,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1880861,101487105,23,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:44:00 PM,14,2,1, +1880862,101487135,23,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:51:00 PM,14,2,1, +1880863,101487504,23,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1880864,101487633,3,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:56:00 PM,14,2,1, +1880865,101488080,23,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:12:00 PM,14,2,1, +1880866,101488141,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:43:00 PM,14,2,1, +1880867,101488207,10,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:43:00 PM,14,2,1, +1880868,101488530,23,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1880869,101488585,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:55:00 PM,14,2,1, +1880870,101489789,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:44:00 PM,14,2,1, +1880871,101489978,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:46:00 PM,14,2,1, +1880872,101490327,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1880873,101490438,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:55:00 PM,14,2,1, +1880874,101490907,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:27:00 PM,14,2,1, +1880875,101491146,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1880876,101491631,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:09:00 PM,14,2,1, +1880877,101491978,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:34:00 PM,14,2,1, +1880878,101492346,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:57:00 PM,14,2,1, +1880879,101492400,1,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:59:00 PM,14,2,1, +1880880,101492530,23,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:52:00 PM,14,2,1, +1880881,101493138,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:22:00 PM,14,2,1, +1880882,101493164,2,2194892,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1880883,101535434,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:58:00 PM,14,2,1, +1880884,101535448,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:12:00 PM,14,2,1, +1880885,101535579,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:26:00 PM,14,2,1, +1880886,101535679,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:00:00 PM,14,2,1, +1880887,101535755,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:41:00 PM,14,2,1, +1880888,101535817,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1880889,101536335,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1880890,101536358,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1880891,101537262,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1880892,101537272,10,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:52:00 PM,14,2,1, +1880893,101537407,14,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:47:00 PM,14,2,1, +1880894,101537475,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:11:00 PM,14,2,1, +1880895,101537732,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:14:00 PM,14,2,1, +1880896,101537751,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:23:00 PM,14,2,1, +1880897,101537912,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:14:00 PM,14,2,1, +1880898,101538267,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:56:00 PM,14,2,1, +1880899,101538312,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:31:00 PM,14,2,1, +1880900,101538656,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1880901,101538769,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:57:00 PM,14,2,1, +1880902,101539258,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:11:00 PM,14,2,1, +1880903,101539333,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:38:00 PM,14,2,1, +1880904,101539352,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:08:00 PM,14,2,1, +1880905,101539426,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1880906,101539432,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:51:00 PM,14,2,1, +1880907,101539461,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:00:00 PM,14,2,1, +1880908,101539632,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:11:00 PM,14,2,1, +1880909,101539685,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:03:00 PM,14,2,1, +1880910,101539800,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:31:00 PM,14,2,1, +1880911,101539923,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:02:00 PM,14,2,1, +1880912,101540011,10,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:01:00 PM,14,2,1, +1880913,101540045,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:51:00 PM,14,2,1, +1880914,101540134,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:03:00 PM,14,2,1, +1880915,101540950,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:06:00 PM,14,2,1, +1880916,101541001,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:33:00 PM,14,2,1, +1880917,101541027,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:23:00 PM,14,2,1, +1880918,101541089,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:40:00 PM,14,2,1, +1880919,101541271,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1880920,101541377,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:41:00 PM,14,2,1, +1880921,101541466,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:48:00 PM,14,2,1, +1880922,101541542,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:38:00 PM,14,2,1, +1880923,101541607,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:50:00 PM,14,2,1, +1880924,101541705,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:04:00 PM,14,2,1, +1880925,101541842,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:23:00 PM,14,2,1, +1880926,101541999,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:13:00 PM,14,2,1, +1880927,101542019,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:40:00 PM,14,2,1, +1880928,101542107,3,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:52:00 PM,14,2,1, +1880929,101542165,1,2196564,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1880930,101548411,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:21:00 PM,14,2,1, +1880931,101548428,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:03:00 PM,14,2,1, +1880932,101548452,23,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:34:00 PM,14,2,1, +1880933,101548456,2,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1880934,101548458,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1880935,101548491,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:35:00 PM,14,2,1, +1880936,101548518,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:52:00 PM,14,2,1, +1880937,101548520,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:17:00 PM,14,2,1, +1880938,101548554,23,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:59:00 PM,14,2,1, +1880939,101548555,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880940,101548558,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:40:00 PM,14,2,1, +1880941,101548581,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:51:00 PM,14,2,1, +1880942,101548618,31,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:18:00 PM,14,2,1, +1880943,101548663,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1880944,101548698,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:09:00 PM,14,2,1, +1880945,101548706,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1880946,101548751,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1880947,101548755,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:17:00 PM,14,2,1, +1880948,101548773,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1880949,101548802,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1880950,101548807,2,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:42:00 PM,14,2,1, +1880951,101548827,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1880952,101548936,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:56:00 PM,14,2,1, +1880953,101548947,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1880954,101548948,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880955,101548967,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880956,101548984,2,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1880957,101549003,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:41:00 PM,14,2,1, +1880958,101549030,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:49:00 PM,14,2,1, +1880959,101549032,2,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1880960,101549036,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:38:00 PM,14,2,1, +1880961,101549043,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880962,101549067,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880963,101549119,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:11:00 PM,14,2,1, +1880964,101549119,23,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:14:00 PM,14,2,1, +1880965,101549126,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880966,101549144,31,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:09:00 PM,14,2,1, +1880967,101549194,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1880968,101549206,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880969,101549212,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:19:00 PM,14,2,1, +1880970,101549247,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:04:00 PM,14,2,1, +1880971,101549261,2,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:07:00 PM,14,2,1, +1880972,101549397,2,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:36:00 PM,14,2,1, +1880973,101549402,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1880974,101549427,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880975,101551973,10,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:38:00 PM,14,2,1, +1880976,101551985,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1880977,101552014,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1880978,101552089,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:53:00 PM,14,2,1, +1880979,101552104,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1880980,101552108,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:29:00 PM,14,2,1, +1880981,101552162,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:52:00 PM,14,2,1, +1880982,101552165,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1880983,101552189,10,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:39:00 PM,14,2,1, +1880984,101552208,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:26:00 PM,14,2,1, +1880985,101552213,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1880986,101552214,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:38:00 PM,14,2,1, +1880987,101552225,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1880988,101552269,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1880989,101552271,1,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:14:00 PM,14,2,1, +1880990,101552321,2,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:42:00 PM,14,2,1, +1880991,101552341,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1880992,101552372,3,2196054,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1880993,101535419,2,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:00:00 PM,14,2,1, +1880994,101535504,2,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1880995,101535505,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1880996,101535544,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:05:00 PM,14,2,1, +1880997,101535769,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1880998,101535899,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:34:00 PM,14,2,1, +1880999,101535972,2,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:07:00 PM,14,2,1, +1881000,101536048,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1881001,101536102,3,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:07:00 PM,14,2,1, +1881002,101536338,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:34:00 PM,14,2,1, +1881003,101536557,3,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:09:00 PM,14,2,1, +1881004,101537163,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1881005,101537306,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1881006,101537320,2,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:11:00 PM,14,2,1, +1881007,101537894,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1881008,101538320,2,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:36:00 PM,14,2,1, +1881009,101538441,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:34:00 PM,14,2,1, +1881010,101538658,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:03:00 PM,14,2,1, +1881011,101538740,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:16:00 PM,14,2,1, +1881012,101539165,23,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1881013,101539253,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1881014,101539268,3,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:36:00 PM,14,2,1, +1881015,101540326,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1881016,101540533,3,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1881017,101540590,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1881018,101540637,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:44:00 PM,14,2,1, +1881019,101540663,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:16:00 PM,14,2,1, +1881020,101540926,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1881021,101541072,3,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:52:00 PM,14,2,1, +1881022,101541083,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1881023,101541306,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1881024,101541381,3,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:53:00 PM,14,2,1, +1881025,101541472,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:51:00 PM,14,2,1, +1881026,101541828,1,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:40:00 PM,14,2,1, +1881027,101542001,3,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881028,101542088,14,2188645,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:06:00 PM,14,2,1, +1881029,101477934,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1881030,101478654,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:01:00 PM,14,2,1, +1881031,101478729,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:23:00 PM,14,2,1, +1881032,101478847,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:23:00 PM,14,2,1, +1881033,101479034,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1881034,101479044,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:55:00 PM,14,2,1, +1881035,101479106,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1881036,101479124,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:51:00 PM,14,2,1, +1881037,101479192,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1881038,101479598,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:09:00 PM,14,2,1, +1881039,101479766,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:16:00 PM,14,2,1, +1881040,101479906,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:57:00 PM,14,2,1, +1881041,101479956,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:26:00 PM,14,2,1, +1881042,101480199,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:59:00 PM,14,2,1, +1881043,101480333,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:11:00 PM,14,2,1, +1881044,101480427,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:33:00 PM,14,2,1, +1881045,101480458,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:58:00 PM,14,2,1, +1881046,101480822,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:26:00 PM,14,2,1, +1881047,101480911,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1881048,101482004,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:08:00 PM,14,2,1, +1881049,101482385,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:01:00 PM,14,2,1, +1881050,101483891,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:01:00 PM,14,2,1, +1881051,101484153,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:17:00 PM,14,2,1, +1881052,101484478,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:27:00 PM,14,2,1, +1881053,101485707,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:36:00 PM,14,2,1, +1881054,101485737,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:11:00 PM,14,2,1, +1881055,101485851,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1881056,101485893,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:17:00 PM,14,2,1, +1881057,101485935,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:15:00 PM,14,2,1, +1881058,101486121,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:10:00 PM,14,2,1, +1881059,101486219,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:25:00 PM,14,2,1, +1881060,101486872,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:23:00 PM,14,2,1, +1881061,101486972,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:18:00 PM,14,2,1, +1881062,101487019,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:29:00 PM,14,2,1, +1881063,101487162,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1881064,101487363,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1881065,101487537,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:13:00 PM,14,2,1, +1881066,101487552,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:21:00 PM,14,2,1, +1881067,101487574,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1881068,101487779,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:50:00 PM,14,2,1, +1881069,101487848,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:08:00 PM,14,2,1, +1881070,101487981,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:03:00 PM,14,2,1, +1881071,101488225,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:48:00 PM,14,2,1, +1881072,101488649,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:07:00 PM,14,2,1, +1881073,101489869,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:01:00 PM,14,2,1, +1881074,101489896,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1881075,101490308,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:53:00 PM,14,2,1, +1881076,101490658,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1881077,101491130,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:59:00 PM,14,2,1, +1881078,101491263,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:56:00 PM,14,2,1, +1881079,101491320,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881080,101491914,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:18:00 PM,14,2,1, +1881081,101492405,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:50:00 PM,14,2,1, +1881082,101492506,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:12:00 PM,14,2,1, +1881083,101492638,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:21:00 PM,14,2,1, +1881084,101493084,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1881085,101493277,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1881086,101493283,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:45:00 PM,14,2,1, +1881087,101493506,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:19:00 PM,14,2,1, +1881088,101493529,14,2194904,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1881089,101475779,3,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:29:00 PM,14,2,1, +1881090,101475930,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:34:00 PM,14,2,1, +1881091,101477720,3,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:36:00 PM,14,2,1, +1881092,101477855,2,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:52:00 PM,14,2,1, +1881093,101477947,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881094,101477990,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:42:00 PM,14,2,1, +1881095,101478452,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881096,101478476,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:42:00 PM,14,2,1, +1881097,101478498,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881098,101478551,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881099,101478666,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881100,101478742,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881101,101479168,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:45:00 PM,14,2,1, +1881102,101479653,10,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881103,101479814,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881104,101479912,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881105,101480204,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:01:00 PM,14,2,1, +1881106,101480587,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:58:00 PM,14,2,1, +1881107,101480598,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:46:00 PM,14,2,1, +1881108,101480704,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881109,101480922,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881110,101481345,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:11:00 PM,14,2,1, +1881111,101481463,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881112,101481512,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881113,101481756,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:08:00 PM,14,2,1, +1881114,101481820,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881115,101481962,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:08:00 PM,14,2,1, +1881116,101482097,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:28:00 PM,14,2,1, +1881117,101482251,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1881118,101483022,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881119,101483059,10,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881120,101483146,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881121,101483606,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:59:00 PM,14,2,1, +1881122,101483682,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881123,101483792,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881124,101483831,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881125,101483872,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881126,101483965,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1881127,101484131,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881128,101484182,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881129,101484278,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1881130,101484787,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:03:00 PM,14,2,1, +1881131,101484803,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:27:00 PM,14,2,1, +1881132,101485041,10,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:30:00 PM,14,2,1, +1881133,101485222,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:00:00 PM,14,2,1, +1881134,101485332,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:56:00 PM,14,2,1, +1881135,101485383,2,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:07:00 PM,14,2,1, +1881136,101485518,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:15:00 PM,14,2,1, +1881137,101487796,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1881138,101487906,3,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1881139,101488184,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1881140,101488273,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881141,101488285,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881142,101488604,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:02:00 PM,14,2,1, +1881143,101490001,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:10:00 PM,14,2,1, +1881144,101490394,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:55:00 PM,14,2,1, +1881145,101490588,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881146,101490724,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:43:00 PM,14,2,1, +1881147,101490901,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881148,101491125,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:53:00 PM,14,2,1, +1881149,101491196,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:20:00 PM,14,2,1, +1881150,101491505,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:11:00 PM,14,2,1, +1881151,101491926,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1881152,101491938,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:55:00 PM,14,2,1, +1881153,101492494,40,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881154,101492575,1,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:06:00 PM,14,2,1, +1881155,101493222,2,2190907,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:57:00 PM,14,2,1, +1881156,101475809,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:58:00 PM,14,2,1, +1881157,101476028,2,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:10:00 PM,14,2,1, +1881158,101477744,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:15:00 PM,14,2,1, +1881159,101477775,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:30:00 PM,14,2,1, +1881160,101478377,2,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:45:00 PM,14,2,1, +1881161,101478509,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881162,101478613,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:59:00 PM,14,2,1, +1881163,101479294,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:11:00 PM,14,2,1, +1881164,101479573,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881165,101479653,10,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:30:00 PM,14,2,1, +1881166,101479711,2,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:52:00 PM,14,2,1, +1881167,101479745,31,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:17:00 PM,14,2,1, +1881168,101480075,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1881169,101480080,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:06:00 PM,14,2,1, +1881170,101480323,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:28:00 PM,14,2,1, +1881171,101480734,3,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:29:00 PM,14,2,1, +1881172,101480900,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1881173,101480984,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:38:00 PM,14,2,1, +1881174,101481198,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881175,101481210,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:42:00 PM,14,2,1, +1881176,101481457,2,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:41:00 PM,14,2,1, +1881177,101481576,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881178,101481803,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881179,101482519,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881180,101482526,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:07:00 PM,14,2,1, +1881181,101482630,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1881182,101482835,10,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1881183,101482872,3,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:09:00 PM,14,2,1, +1881184,101483059,10,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:30:00 PM,14,2,1, +1881185,101483098,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881186,101484170,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881187,101484323,2,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:27:00 PM,14,2,1, +1881188,101484363,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:29:00 PM,14,2,1, +1881189,101484689,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:19:00 PM,14,2,1, +1881190,101484827,2,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:53:00 PM,14,2,1, +1881191,101484960,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:22:00 PM,14,2,1, +1881192,101485041,10,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881193,101485313,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881194,101485319,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1881195,101487729,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881196,101487753,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881197,101487838,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881198,101488291,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881199,101488407,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881200,101488610,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:04:00 PM,14,2,1, +1881201,101489771,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881202,101489831,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:56:00 PM,14,2,1, +1881203,101489841,3,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:58:00 PM,14,2,1, +1881204,101489880,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:42:00 PM,14,2,1, +1881205,101489925,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:11:00 PM,14,2,1, +1881206,101489971,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881207,101490263,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881208,101490333,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881209,101490599,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:02:00 PM,14,2,1, +1881210,101490786,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881211,101490797,2,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:14:00 PM,14,2,1, +1881212,101491185,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:56:00 PM,14,2,1, +1881213,101491444,2,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:00:00 PM,14,2,1, +1881214,101492096,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881215,101492102,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:09:00 PM,14,2,1, +1881216,101492216,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881217,101492362,2,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:37:00 PM,14,2,1, +1881218,101492367,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:42:00 PM,14,2,1, +1881219,101492563,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:42:00 PM,14,2,1, +1881220,101492723,31,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:09:00 PM,14,2,1, +1881221,101492769,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881222,101492782,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881223,101492794,2,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:15:00 PM,14,2,1, +1881224,101492928,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881225,101493095,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881226,101493198,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881227,101493307,40,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881228,101493331,1,2194888,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:12:00 PM,14,2,1, +1881229,101218450,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1881230,101218520,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:01:00 PM,14,2,1, +1881231,101218668,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:59:00 PM,14,2,1, +1881232,101218769,2,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881233,101218810,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:32:00 PM,14,2,1, +1881234,101218942,2,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1881235,101218980,31,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:52:00 PM,14,2,1, +1881236,101219141,40,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:22:00 PM,14,2,1, +1881237,101219230,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:52:00 PM,14,2,1, +1881238,101219236,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:53:00 PM,14,2,1, +1881239,101219325,2,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:28:00 PM,14,2,1, +1881240,101219370,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1881241,101219370,2,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1881242,101219387,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:59:00 PM,14,2,1, +1881243,101219583,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:30:00 PM,14,2,1, +1881244,101220550,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:24:00 PM,14,2,1, +1881245,101220561,2,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1881246,101220645,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:43:00 PM,14,2,1, +1881247,101220971,40,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1881248,101221064,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:38:00 PM,14,2,1, +1881249,101221169,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:00:00 PM,14,2,1, +1881250,101221479,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:56:00 PM,14,2,1, +1881251,101221600,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:41:00 PM,14,2,1, +1881252,101221713,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1881253,101221758,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:17:00 PM,14,2,1, +1881254,101221825,2,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:26:00 PM,14,2,1, +1881255,101222001,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:16:00 PM,14,2,1, +1881256,101222016,2,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1881257,101222026,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:06:00 PM,14,2,1, +1881258,101222066,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:26:00 PM,14,2,1, +1881259,101222272,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1881260,101222292,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:59:00 PM,14,2,1, +1881261,101222491,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:36:00 PM,14,2,1, +1881262,101222632,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:39:00 PM,14,2,1, +1881263,101222686,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:05:00 PM,14,2,1, +1881264,101222696,2,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1881265,101222723,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:29:00 PM,14,2,1, +1881266,101222816,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:05:00 PM,14,2,1, +1881267,101222827,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:16:00 PM,14,2,1, +1881268,101222934,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:05:00 PM,14,2,1, +1881269,101222940,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:30:00 PM,14,2,1, +1881270,101223184,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:08:00 PM,14,2,1, +1881271,101223236,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:51:00 PM,14,2,1, +1881272,101223488,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:12:00 PM,14,2,1, +1881273,101223627,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:18:00 PM,14,2,1, +1881274,101223651,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1881275,101223760,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:43:00 PM,14,2,1, +1881276,101223782,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1881277,101223940,2,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:45:00 PM,14,2,1, +1881278,101224240,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:10:00 PM,14,2,1, +1881279,101224308,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:56:00 PM,14,2,1, +1881280,101224311,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1881281,101224340,2,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:33:00 PM,14,2,1, +1881282,101224431,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:34:00 PM,14,2,1, +1881283,101224465,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:50:00 PM,14,2,1, +1881284,101224536,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:42:00 PM,14,2,1, +1881285,101224623,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:33:00 PM,14,2,1, +1881286,101224745,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:56:00 PM,14,2,1, +1881287,101224828,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:07:00 PM,14,2,1, +1881288,101224833,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:27:00 PM,14,2,1, +1881289,101225031,40,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:55:00 PM,14,2,1, +1881290,101225179,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1881291,101225260,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:54:00 PM,14,2,1, +1881292,101225302,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1881293,101225373,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:46:00 PM,14,2,1, +1881294,101225401,2,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:50:00 PM,14,2,1, +1881295,101225457,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1881296,101225627,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:47:00 PM,14,2,1, +1881297,101225635,1,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:33:00 PM,14,2,1, +1881298,101225770,3,2190893,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:48:00 PM,14,2,1, +1881299,101548405,3,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:42:00 PM,14,2,1, +1881300,101548454,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1881301,101548492,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:40:00 PM,14,2,1, +1881302,101548525,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:44:00 PM,14,2,1, +1881303,101548533,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1881304,101548545,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:13:00 PM,14,2,1, +1881305,101548616,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1881306,101548661,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:55:00 PM,14,2,1, +1881307,101548671,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:47:00 PM,14,2,1, +1881308,101548690,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:25:00 PM,14,2,1, +1881309,101548707,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:59:00 PM,14,2,1, +1881310,101548715,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:11:00 PM,14,2,1, +1881311,101548742,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:15:00 PM,14,2,1, +1881312,101548811,3,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881313,101548830,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1881314,101548853,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881315,101548882,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:24:00 PM,14,2,1, +1881316,101548941,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1881317,101548971,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:41:00 PM,14,2,1, +1881318,101548988,10,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:49:00 PM,14,2,1, +1881319,101549026,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:49:00 PM,14,2,1, +1881320,101549031,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:27:00 PM,14,2,1, +1881321,101549050,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1881322,101549063,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:14:00 PM,14,2,1, +1881323,101549142,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:08:00 PM,14,2,1, +1881324,101549191,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:15:00 PM,14,2,1, +1881325,101549234,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:24:00 PM,14,2,1, +1881326,101549246,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881327,101549262,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:55:00 PM,14,2,1, +1881328,101549271,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:34:00 PM,14,2,1, +1881329,101549276,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:54:00 PM,14,2,1, +1881330,101549277,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:20:00 PM,14,2,1, +1881331,101549421,3,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:10:00 PM,14,2,1, +1881332,101551935,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:09:00 PM,14,2,1, +1881333,101551979,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:57:00 PM,14,2,1, +1881334,101552017,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:28:00 PM,14,2,1, +1881335,101552125,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:26:00 PM,14,2,1, +1881336,101552146,14,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:42:00 PM,14,2,1, +1881337,101552179,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:47:00 PM,14,2,1, +1881338,101552185,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:20:00 PM,14,2,1, +1881339,101552234,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:46:00 PM,14,2,1, +1881340,101552261,1,2194773,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1881341,101552407,2,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:19:00 PM,14,2,1, +1881342,101552644,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:52:00 PM,14,2,1, +1881343,101552651,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:14:00 PM,14,2,1, +1881344,101552663,3,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:18:00 PM,14,2,1, +1881345,101552749,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1881346,101552775,2,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:05:00 PM,14,2,1, +1881347,101552791,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:39:00 PM,14,2,1, +1881348,101552828,3,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:00:00 PM,14,2,1, +1881349,101552843,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:51:00 PM,14,2,1, +1881350,101552856,3,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:22:00 PM,14,2,1, +1881351,101552857,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:19:00 PM,14,2,1, +1881352,101552873,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1881353,101552931,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1881354,101552936,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:25:00 PM,14,2,1, +1881355,101553149,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1881356,101553189,2,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:15:00 PM,14,2,1, +1881357,101553196,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:49:00 PM,14,2,1, +1881358,101553231,2,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:01:00 PM,14,2,1, +1881359,101553287,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:47:00 PM,14,2,1, +1881360,101553292,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881361,101553394,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:21:00 PM,14,2,1, +1881362,101553410,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:04:00 PM,14,2,1, +1881363,101553421,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881364,101553438,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:44:00 PM,14,2,1, +1881365,101553463,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:06:00 PM,14,2,1, +1881366,101553480,2,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:58:00 PM,14,2,1, +1881367,101553510,2,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:26:00 PM,14,2,1, +1881368,101553781,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1881369,101553823,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:34:00 PM,14,2,1, +1881370,101555074,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:59:00 PM,14,2,1, +1881371,101555124,40,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1881372,101555148,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:13:00 PM,14,2,1, +1881373,101555221,40,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:46:00 PM,14,2,1, +1881374,101555302,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:07:00 PM,14,2,1, +1881375,101555371,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:03:00 PM,14,2,1, +1881376,101555414,3,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1881377,101555567,2,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:22:00 PM,14,2,1, +1881378,101555624,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:28:00 PM,14,2,1, +1881379,101555658,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:55:00 PM,14,2,1, +1881380,101555701,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:31:00 PM,14,2,1, +1881381,101555722,2,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1881382,101555726,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:21:00 PM,14,2,1, +1881383,101555730,2,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:24:00 PM,14,2,1, +1881384,101555766,3,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:35:00 PM,14,2,1, +1881385,101555778,2,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:48:00 PM,14,2,1, +1881386,101555784,2,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:31:00 PM,14,2,1, +1881387,101555831,1,2196065,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:33:00 PM,14,2,1, +1881388,101218506,10,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881389,101218621,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:03:00 PM,14,2,1, +1881390,101218637,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:44:00 PM,14,2,1, +1881391,101218695,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:40:00 PM,14,2,1, +1881392,101218754,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:29:00 PM,14,2,1, +1881393,101219113,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:45:00 PM,14,2,1, +1881394,101219141,40,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:21:00 PM,14,2,1, +1881395,101219219,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:28:00 PM,14,2,1, +1881396,101219236,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:53:00 PM,14,2,1, +1881397,101219288,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:16:00 PM,14,2,1, +1881398,101219304,2,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1881399,101219370,2,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1881400,101219387,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1881401,101219440,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:17:00 PM,14,2,1, +1881402,101219583,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:32:00 PM,14,2,1, +1881403,101219622,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:16:00 PM,14,2,1, +1881404,101220141,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1881405,101220195,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:46:00 PM,14,2,1, +1881406,101220221,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:32:00 PM,14,2,1, +1881407,101220373,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:06:00 PM,14,2,1, +1881408,101220678,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1881409,101220863,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:58:00 PM,14,2,1, +1881410,101220971,40,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1881411,101221064,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:39:00 PM,14,2,1, +1881412,101221202,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:01:00 PM,14,2,1, +1881413,101221253,10,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1881414,101221484,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:51:00 PM,14,2,1, +1881415,101221518,2,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:54:00 PM,14,2,1, +1881416,101221572,10,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:50:00 PM,14,2,1, +1881417,101221577,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:43:00 PM,14,2,1, +1881418,101221583,2,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:55:00 PM,14,2,1, +1881419,101221734,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1881420,101221842,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:31:00 PM,14,2,1, +1881421,101221846,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:15:00 PM,14,2,1, +1881422,101221995,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1881423,101222001,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:17:00 PM,14,2,1, +1881424,101222026,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:09:00 PM,14,2,1, +1881425,101222066,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:26:00 PM,14,2,1, +1881426,101222292,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:59:00 PM,14,2,1, +1881427,101222428,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:54:00 PM,14,2,1, +1881428,101222491,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:36:00 PM,14,2,1, +1881429,101222590,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:59:00 PM,14,2,1, +1881430,101222686,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:07:00 PM,14,2,1, +1881431,101222718,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:52:00 PM,14,2,1, +1881432,101222723,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:29:00 PM,14,2,1, +1881433,101222849,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:45:00 PM,14,2,1, +1881434,101222934,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:07:00 PM,14,2,1, +1881435,101223037,2,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:48:00 PM,14,2,1, +1881436,101223184,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:08:00 PM,14,2,1, +1881437,101223471,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:34:00 PM,14,2,1, +1881438,101223540,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:05:00 PM,14,2,1, +1881439,101223570,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:48:00 PM,14,2,1, +1881440,101223616,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:12:00 PM,14,2,1, +1881441,101223674,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:39:00 PM,14,2,1, +1881442,101223695,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:49:00 PM,14,2,1, +1881443,101223782,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:01:00 PM,14,2,1, +1881444,101223945,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:49:00 PM,14,2,1, +1881445,101224030,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:42:00 PM,14,2,1, +1881446,101224219,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1881447,101224308,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:56:00 PM,14,2,1, +1881448,101224311,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:19:00 PM,14,2,1, +1881449,101224431,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:35:00 PM,14,2,1, +1881450,101224447,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:53:00 PM,14,2,1, +1881451,101224465,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:50:00 PM,14,2,1, +1881452,101224536,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:42:00 PM,14,2,1, +1881453,101224812,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:55:00 PM,14,2,1, +1881454,101224828,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:07:00 PM,14,2,1, +1881455,101224833,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:28:00 PM,14,2,1, +1881456,101225010,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:33:00 PM,14,2,1, +1881457,101225148,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:42:00 PM,14,2,1, +1881458,101225165,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881459,101225179,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1881460,101225373,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:46:00 PM,14,2,1, +1881461,101225457,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1881462,101225535,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:14:00 PM,14,2,1, +1881463,101225556,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:30:00 PM,14,2,1, +1881464,101225627,1,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:47:00 PM,14,2,1, +1881465,101225704,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:00:00 PM,14,2,1, +1881466,101225755,3,2180225,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:18:00 PM,14,2,1, +1881467,101475746,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:58:00 PM,14,2,1, +1881468,101475942,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:09:00 PM,14,2,1, +1881469,101475966,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:11:00 PM,14,2,1, +1881470,101477910,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:20:00 PM,14,2,1, +1881471,101478229,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:25:00 PM,14,2,1, +1881472,101478539,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:04:00 PM,14,2,1, +1881473,101478630,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:15:00 PM,14,2,1, +1881474,101478672,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:46:00 PM,14,2,1, +1881475,101478729,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:27:00 PM,14,2,1, +1881476,101479061,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:10:00 PM,14,2,1, +1881477,101479197,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:59:00 PM,14,2,1, +1881478,101479288,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:17:00 PM,14,2,1, +1881479,101479610,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:03:00 PM,14,2,1, +1881480,101479623,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:06:00 PM,14,2,1, +1881481,101480199,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:59:00 PM,14,2,1, +1881482,101480698,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881483,101481152,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1881484,101481340,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:14:00 PM,14,2,1, +1881485,101481625,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:00:00 PM,14,2,1, +1881486,101482085,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:19:00 PM,14,2,1, +1881487,101482188,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1881488,101482219,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:32:00 PM,14,2,1, +1881489,101482300,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:33:00 PM,14,2,1, +1881490,101482355,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:58:00 PM,14,2,1, +1881491,101482457,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:01:00 PM,14,2,1, +1881492,101483676,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881493,101483694,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:58:00 PM,14,2,1, +1881494,101483744,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:43:00 PM,14,2,1, +1881495,101483826,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:03:00 PM,14,2,1, +1881496,101483860,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:44:00 PM,14,2,1, +1881497,101483984,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:07:00 PM,14,2,1, +1881498,101484248,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:23:00 PM,14,2,1, +1881499,101485573,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:45:00 PM,14,2,1, +1881500,101485669,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:17:00 PM,14,2,1, +1881501,101485761,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:21:00 PM,14,2,1, +1881502,101485851,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:30:00 PM,14,2,1, +1881503,101485947,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:11:00 PM,14,2,1, +1881504,101485958,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:08:00 PM,14,2,1, +1881505,101486029,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1881506,101486075,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:15:00 PM,14,2,1, +1881507,101486104,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:10:00 PM,14,2,1, +1881508,101486155,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:22:00 PM,14,2,1, +1881509,101486245,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:21:00 PM,14,2,1, +1881510,101486784,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:23:00 PM,14,2,1, +1881511,101486829,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1881512,101486844,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:15:00 PM,14,2,1, +1881513,101486903,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:17:00 PM,14,2,1, +1881514,101486908,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:23:00 PM,14,2,1, +1881515,101487093,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:18:00 PM,14,2,1, +1881516,101487186,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1881517,101487196,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:06:00 PM,14,2,1, +1881518,101487227,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:24:00 PM,14,2,1, +1881519,101487348,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1881520,101487354,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1881521,101487568,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:07:00 PM,14,2,1, +1881522,101487616,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1881523,101487671,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1881524,101487926,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:02:00 PM,14,2,1, +1881525,101487963,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:15:00 PM,14,2,1, +1881526,101488128,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:58:00 PM,14,2,1, +1881527,101488267,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:48:00 PM,14,2,1, +1881528,101488469,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1881529,101490196,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:01:00 PM,14,2,1, +1881530,101490289,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:59:00 PM,14,2,1, +1881531,101490353,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1881532,101490694,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:15:00 PM,14,2,1, +1881533,101490700,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1881534,101491283,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:42:00 PM,14,2,1, +1881535,101491320,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:57:00 PM,14,2,1, +1881536,101492222,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:01:00 PM,14,2,1, +1881537,101492351,3,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:25:00 PM,14,2,1, +1881538,101492708,1,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:59:00 PM,14,2,1, +1881539,101493600,14,2188277,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:09:00 PM,14,2,1, +1881540,101552420,40,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:53:00 PM,14,2,1, +1881541,101552526,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1881542,101552574,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:07:00 PM,14,2,1, +1881543,101552607,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:49:00 PM,14,2,1, +1881544,101552609,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:44:00 PM,14,2,1, +1881545,101552667,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:43:00 PM,14,2,1, +1881546,101552726,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:45:00 PM,14,2,1, +1881547,101552748,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:30:00 PM,14,2,1, +1881548,101552790,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:58:00 PM,14,2,1, +1881549,101552904,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:26:00 PM,14,2,1, +1881550,101552948,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:23:00 PM,14,2,1, +1881551,101552966,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:48:00 PM,14,2,1, +1881552,101552977,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1881553,101553013,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:42:00 PM,14,2,1, +1881554,101553038,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:13:00 PM,14,2,1, +1881555,101553052,40,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:41:00 PM,14,2,1, +1881556,101553056,3,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:58:00 PM,14,2,1, +1881557,101553067,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:46:00 PM,14,2,1, +1881558,101553081,3,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:58:00 PM,14,2,1, +1881559,101553099,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:12:00 PM,14,2,1, +1881560,101553159,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881561,101553178,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:04:00 PM,14,2,1, +1881562,101553227,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:41:00 PM,14,2,1, +1881563,101553256,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:01:00 PM,14,2,1, +1881564,101553281,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:47:00 PM,14,2,1, +1881565,101553359,2,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:53:00 PM,14,2,1, +1881566,101553385,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:03:00 PM,14,2,1, +1881567,101553417,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:17:00 PM,14,2,1, +1881568,101553419,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1881569,101553446,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:54:00 PM,14,2,1, +1881570,101553457,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1881571,101553509,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:06:00 PM,14,2,1, +1881572,101553553,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881573,101553555,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1881574,101553610,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:13:00 PM,14,2,1, +1881575,101553650,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:20:00 PM,14,2,1, +1881576,101553707,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:29:00 PM,14,2,1, +1881577,101553715,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:52:00 PM,14,2,1, +1881578,101553723,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:12:00 PM,14,2,1, +1881579,101553745,40,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881580,101553828,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881581,101553843,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:55:00 PM,14,2,1, +1881582,101553850,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:17:00 PM,14,2,1, +1881583,101553860,2,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:49:00 PM,14,2,1, +1881584,101555053,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:16:00 PM,14,2,1, +1881585,101555142,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:15:00 PM,14,2,1, +1881586,101555231,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:13:00 PM,14,2,1, +1881587,101555233,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:57:00 PM,14,2,1, +1881588,101555304,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:55:00 PM,14,2,1, +1881589,101555332,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:17:00 PM,14,2,1, +1881590,101555355,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:54:00 PM,14,2,1, +1881591,101555358,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881592,101555372,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:18:00 PM,14,2,1, +1881593,101555376,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:15:00 PM,14,2,1, +1881594,101555457,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1881595,101555499,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:05:00 PM,14,2,1, +1881596,101555535,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:02:00 PM,14,2,1, +1881597,101555565,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:08:00 PM,14,2,1, +1881598,101555613,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:41:00 PM,14,2,1, +1881599,101555619,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:12:00 PM,14,2,1, +1881600,101555705,14,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1881601,101555765,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1881602,101555802,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1881603,101555853,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1881604,101555866,1,2194769,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:10:00 PM,14,2,1, +1881605,101535769,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1881606,101536102,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1881607,101536104,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:48:00 PM,14,2,1, +1881608,101536377,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:04:00 PM,14,2,1, +1881609,101536557,3,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1881610,101537320,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1881611,101537546,3,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:06:00 PM,14,2,1, +1881612,101538117,3,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:02:00 PM,14,2,1, +1881613,101538146,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:05:00 PM,14,2,1, +1881614,101538441,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:34:00 PM,14,2,1, +1881615,101538740,2,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1881616,101538997,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:02:00 PM,14,2,1, +1881617,101539014,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:08:00 PM,14,2,1, +1881618,101539077,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:50:00 PM,14,2,1, +1881619,101539084,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:53:00 PM,14,2,1, +1881620,101539165,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:02:00 PM,14,2,1, +1881621,101539293,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:05:00 PM,14,2,1, +1881622,101539702,2,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:18:00 PM,14,2,1, +1881623,101539865,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:14:00 PM,14,2,1, +1881624,101540116,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:31:00 PM,14,2,1, +1881625,101540119,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881626,101540138,2,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:55:00 PM,14,2,1, +1881627,101540236,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1881628,101540533,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1881629,101540590,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1881630,101540926,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:00:00 PM,14,2,1, +1881631,101541039,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1881632,101541472,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1881633,101541515,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:08:00 PM,14,2,1, +1881634,101541679,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:04:00 PM,14,2,1, +1881635,101541763,3,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1881636,101541969,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:02:00 PM,14,2,1, +1881637,101542001,14,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:05:00 PM,14,2,1, +1881638,101542041,2,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1881639,101542088,2,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:33:00 PM,14,2,1, +1881640,101542108,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:00:00 PM,14,2,1, +1881641,101542208,1,2197447,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881642,101552435,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:54:00 PM,14,2,1, +1881643,101552436,3,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:04:00 PM,14,2,1, +1881644,101552481,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1881645,101552511,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:34:00 PM,14,2,1, +1881646,101552520,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:06:00 PM,14,2,1, +1881647,101552537,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:57:00 PM,14,2,1, +1881648,101552567,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:55:00 PM,14,2,1, +1881649,101552594,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:27:00 PM,14,2,1, +1881650,101552615,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:45:00 PM,14,2,1, +1881651,101552618,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:55:00 PM,14,2,1, +1881652,101552634,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:11:00 PM,14,2,1, +1881653,101552636,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:10:00 PM,14,2,1, +1881654,101552645,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:08:00 PM,14,2,1, +1881655,101552703,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:12:00 PM,14,2,1, +1881656,101552729,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:55:00 PM,14,2,1, +1881657,101552745,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:31:00 PM,14,2,1, +1881658,101552772,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:57:00 PM,14,2,1, +1881659,101552773,3,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:27:00 PM,14,2,1, +1881660,101552785,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:30:00 PM,14,2,1, +1881661,101552813,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:56:00 PM,14,2,1, +1881662,101552821,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:29:00 PM,14,2,1, +1881663,101552823,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:54:00 PM,14,2,1, +1881664,101552851,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:55:00 PM,14,2,1, +1881665,101552928,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:15:00 PM,14,2,1, +1881666,101552982,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:31:00 PM,14,2,1, +1881667,101553029,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:07:00 PM,14,2,1, +1881668,101553077,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:12:00 PM,14,2,1, +1881669,101553102,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:41:00 PM,14,2,1, +1881670,101553216,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:31:00 PM,14,2,1, +1881671,101553223,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:50:00 PM,14,2,1, +1881672,101553237,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:34:00 PM,14,2,1, +1881673,101553239,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1881674,101553312,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1881675,101553327,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1881676,101553333,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:47:00 PM,14,2,1, +1881677,101553392,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:28:00 PM,14,2,1, +1881678,101553413,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1881679,101553424,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1881680,101553466,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:56:00 PM,14,2,1, +1881681,101553500,3,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:42:00 PM,14,2,1, +1881682,101553551,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:37:00 PM,14,2,1, +1881683,101553554,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:07:00 PM,14,2,1, +1881684,101553574,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:21:00 PM,14,2,1, +1881685,101553598,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1881686,101553669,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:54:00 PM,14,2,1, +1881687,101553679,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:10:00 PM,14,2,1, +1881688,101553681,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:58:00 PM,14,2,1, +1881689,101553718,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:59:00 PM,14,2,1, +1881690,101553727,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:04:00 PM,14,2,1, +1881691,101553739,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:52:00 PM,14,2,1, +1881692,101553791,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:55:00 PM,14,2,1, +1881693,101553836,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:33:00 PM,14,2,1, +1881694,101553844,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:38:00 PM,14,2,1, +1881695,101553856,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:56:00 PM,14,2,1, +1881696,101553870,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:20:00 PM,14,2,1, +1881697,101553870,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1881698,101553897,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:28:00 PM,14,2,1, +1881699,101553898,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:55:00 PM,14,2,1, +1881700,101555080,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1881701,101555089,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:00:00 PM,14,2,1, +1881702,101555125,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:27:00 PM,14,2,1, +1881703,101555132,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1881704,101555134,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1881705,101555138,3,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:56:00 PM,14,2,1, +1881706,101555163,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:55:00 PM,14,2,1, +1881707,101555300,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:59:00 PM,14,2,1, +1881708,101555368,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:14:00 PM,14,2,1, +1881709,101555370,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:38:00 PM,14,2,1, +1881710,101555374,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:51:00 PM,14,2,1, +1881711,101555386,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:01:00 PM,14,2,1, +1881712,101555396,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:39:00 PM,14,2,1, +1881713,101555403,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1881714,101555408,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881715,101555444,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1881716,101555445,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:56:00 PM,14,2,1, +1881717,101555446,3,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1881718,101555448,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:46:00 PM,14,2,1, +1881719,101555450,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1881720,101555459,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:59:00 PM,14,2,1, +1881721,101555464,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:12:00 PM,14,2,1, +1881722,101555490,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:01:00 PM,14,2,1, +1881723,101555563,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:32:00 PM,14,2,1, +1881724,101555628,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:56:00 PM,14,2,1, +1881725,101555631,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:54:00 PM,14,2,1, +1881726,101555634,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:28:00 PM,14,2,1, +1881727,101555660,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:57:00 PM,14,2,1, +1881728,101555661,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:57:00 PM,14,2,1, +1881729,101555687,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:57:00 PM,14,2,1, +1881730,101555753,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 6:57:00 PM,14,2,1, +1881731,101555770,2,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1881732,101555783,40,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1881733,101555787,1,2194352,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:33:00 PM,14,2,1, +1881734,101535403,14,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1881735,101535441,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:29:00 PM,14,2,1, +1881736,101535475,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:08:00 PM,14,2,1, +1881737,101535507,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:04:00 PM,14,2,1, +1881738,101535508,2,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:05:00 PM,14,2,1, +1881739,101535536,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:00:00 PM,14,2,1, +1881740,101535678,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:02:00 PM,14,2,1, +1881741,101535739,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:26:00 PM,14,2,1, +1881742,101535753,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:04:00 PM,14,2,1, +1881743,101535807,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:16:00 PM,14,2,1, +1881744,101535808,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:26:00 PM,14,2,1, +1881745,101535900,2,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:21:00 PM,14,2,1, +1881746,101535941,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:39:00 PM,14,2,1, +1881747,101535957,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:56:00 PM,14,2,1, +1881748,101536003,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:44:00 PM,14,2,1, +1881749,101536109,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:26:00 PM,14,2,1, +1881750,101536150,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:41:00 PM,14,2,1, +1881751,101536253,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:36:00 PM,14,2,1, +1881752,101536370,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:48:00 PM,14,2,1, +1881753,101536381,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:19:00 PM,14,2,1, +1881754,101536555,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1881755,101536952,40,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1881756,101536976,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:07:00 PM,14,2,1, +1881757,101536993,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:45:00 PM,14,2,1, +1881758,101537082,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:14:00 PM,14,2,1, +1881759,101537090,2,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:13:00 PM,14,2,1, +1881760,101537117,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:00:00 PM,14,2,1, +1881761,101537180,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:55:00 PM,14,2,1, +1881762,101537233,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:24:00 PM,14,2,1, +1881763,101537299,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:34:00 PM,14,2,1, +1881764,101537430,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:47:00 PM,14,2,1, +1881765,101537511,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:04:00 PM,14,2,1, +1881766,101537585,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:02:00 PM,14,2,1, +1881767,101537597,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:46:00 PM,14,2,1, +1881768,101537672,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:58:00 PM,14,2,1, +1881769,101537731,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:09:00 PM,14,2,1, +1881770,101537749,2,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:52:00 PM,14,2,1, +1881771,101537837,10,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:47:00 PM,14,2,1, +1881772,101537841,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:37:00 PM,14,2,1, +1881773,101537900,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:50:00 PM,14,2,1, +1881774,101537969,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:40:00 PM,14,2,1, +1881775,101537996,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:17:00 PM,14,2,1, +1881776,101538116,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:50:00 PM,14,2,1, +1881777,101538129,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1881778,101538156,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:29:00 PM,14,2,1, +1881779,101538274,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:25:00 PM,14,2,1, +1881780,101538351,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1881781,101538426,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1881782,101538532,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:30:00 PM,14,2,1, +1881783,101538581,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:38:00 PM,14,2,1, +1881784,101538646,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:27:00 PM,14,2,1, +1881785,101538765,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:24:00 PM,14,2,1, +1881786,101538911,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:09:00 PM,14,2,1, +1881787,101538943,2,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:09:00 PM,14,2,1, +1881788,101539220,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:58:00 PM,14,2,1, +1881789,101539399,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:24:00 PM,14,2,1, +1881790,101539459,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:07:00 PM,14,2,1, +1881791,101539524,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:06:00 PM,14,2,1, +1881792,101539543,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:46:00 PM,14,2,1, +1881793,101539560,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:01:00 PM,14,2,1, +1881794,101539587,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:30:00 PM,14,2,1, +1881795,101539659,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:27:00 PM,14,2,1, +1881796,101539745,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:55:00 PM,14,2,1, +1881797,101539765,2,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:28:00 PM,14,2,1, +1881798,101539846,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:01:00 PM,14,2,1, +1881799,101539872,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:08:00 PM,14,2,1, +1881800,101539905,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1881801,101539965,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:20:00 PM,14,2,1, +1881802,101540007,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:47:00 PM,14,2,1, +1881803,101540090,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:59:00 PM,14,2,1, +1881804,101540102,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:35:00 PM,14,2,1, +1881805,101540185,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:48:00 PM,14,2,1, +1881806,101540191,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1881807,101540216,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:52:00 PM,14,2,1, +1881808,101540226,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1881809,101540327,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:49:00 PM,14,2,1, +1881810,101540343,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:01:00 PM,14,2,1, +1881811,101540433,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1881812,101540460,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:15:00 PM,14,2,1, +1881813,101540561,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:57:00 PM,14,2,1, +1881814,101540572,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1881815,101540614,40,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1881816,101540653,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:10:00 PM,14,2,1, +1881817,101540669,2,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:46:00 PM,14,2,1, +1881818,101540792,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:17:00 PM,14,2,1, +1881819,101540843,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:20:00 PM,14,2,1, +1881820,101541161,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:09:00 PM,14,2,1, +1881821,101541248,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:51:00 PM,14,2,1, +1881822,101541249,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:50:00 PM,14,2,1, +1881823,101541353,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:45:00 PM,14,2,1, +1881824,101541354,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:39:00 PM,14,2,1, +1881825,101541365,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1881826,101541512,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:22:00 PM,14,2,1, +1881827,101541595,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:54:00 PM,14,2,1, +1881828,101541649,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:15:00 PM,14,2,1, +1881829,101541793,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:14:00 PM,14,2,1, +1881830,101541856,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:30:00 PM,14,2,1, +1881831,101541867,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:33:00 PM,14,2,1, +1881832,101541973,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 7:44:00 PM,14,2,1, +1881833,101541978,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:24:00 PM,14,2,1, +1881834,101542093,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 9:02:00 PM,14,2,1, +1881835,101542252,2,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:11:00 PM,14,2,1, +1881836,101542303,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:47:00 PM,14,2,1, +1881837,101542326,1,2188252,92476,2169650,10/8/2021 10:50:00 PM,10/8/2021 8:09:00 PM,14,2,1, +1881840,101548998,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:32:00 PM,14,2,1, +1881841,101552500,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:10:00 PM,14,2,1, +1881842,101552779,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:10:00 PM,14,2,1, +1881843,101548890,14,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:44:00 PM,14,2,1, +1881844,101549027,14,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:28:00 PM,14,2,1, +1881845,101552394,14,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:17:00 PM,14,2,1, +1881846,101549082,14,2196541,92476,2196541,10/8/2021 10:51:00 PM,10/8/2021 9:40:00 PM,14,2,1, +1881847,101552151,14,2196541,92476,2196541,10/8/2021 10:51:00 PM,10/8/2021 7:18:00 PM,14,2,1, +1881848,101552387,14,2196541,92476,2196541,10/8/2021 10:51:00 PM,10/8/2021 7:29:00 PM,14,2,1, +1881849,101548561,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:37:00 PM,14,2,1, +1881850,101548839,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:08:00 PM,14,2,1, +1881851,101549279,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:06:00 PM,14,2,1, +1881852,101551932,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:57:00 PM,14,2,1, +1881853,101552351,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:48:00 PM,14,2,1, +1881854,101548433,14,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:00:00 PM,14,2,1, +1881855,101548539,14,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:34:00 PM,14,2,1, +1881856,101548756,14,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:25:00 PM,14,2,1, +1881857,101548816,14,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:44:00 PM,14,2,1, +1881858,101548959,14,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:51:00 PM,14,2,1, +1881859,101548990,14,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:37:00 PM,14,2,1, +1881860,101549267,14,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:57:00 PM,14,2,1, +1881861,101552318,14,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:02:00 PM,14,2,1, +1881862,101552723,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:32:00 PM,14,2,1, +1881863,101552725,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:02:00 PM,14,2,1, +1881864,101552748,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:44:00 PM,14,2,1, +1881865,101552976,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:41:00 PM,14,2,1, +1881866,101552997,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:25:00 PM,14,2,1, +1881867,101553320,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:22:00 PM,14,2,1, +1881868,101555102,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1881869,101555278,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:28:00 PM,14,2,1, +1881870,101555565,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:08:00 PM,14,2,1, +1881871,101555866,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:50:00 PM,14,2,1, +1881872,101553415,14,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:29:00 PM,14,2,1, +1881873,101548543,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:57:00 PM,14,2,1, +1881874,101548711,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:44:00 PM,14,2,1, +1881875,101548825,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:35:00 PM,14,2,1, +1881876,101548921,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:15:00 PM,14,2,1, +1881877,101548925,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:26:00 PM,14,2,1, +1881878,101548976,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881879,101548992,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:55:00 PM,14,2,1, +1881880,101549008,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:07:00 PM,14,2,1, +1881881,101549018,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:15:00 PM,14,2,1, +1881882,101549199,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:24:00 PM,14,2,1, +1881883,101551919,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:08:00 PM,14,2,1, +1881884,101552004,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1881885,101552085,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:53:00 PM,14,2,1, +1881886,101552183,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:39:00 PM,14,2,1, +1881887,101552324,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:49:00 PM,14,2,1, +1881888,101552349,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:33:00 PM,14,2,1, +1881889,101548436,14,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:08:00 PM,14,2,1, +1881890,101548490,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:44:00 PM,14,2,1, +1881891,101548557,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881892,101548577,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:34:00 PM,14,2,1, +1881893,101548738,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:35:00 PM,14,2,1, +1881894,101548748,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1881895,101548767,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881896,101548842,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:38:00 PM,14,2,1, +1881897,101548895,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:42:00 PM,14,2,1, +1881898,101548977,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:03:00 PM,14,2,1, +1881899,101549048,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:36:00 PM,14,2,1, +1881900,101549070,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:25:00 PM,14,2,1, +1881901,101549117,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:07:00 PM,14,2,1, +1881902,101549139,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:44:00 PM,14,2,1, +1881903,101549238,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:33:00 PM,14,2,1, +1881904,101552152,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:50:00 PM,14,2,1, +1881905,101552202,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:23:00 PM,14,2,1, +1881906,101552266,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1881907,101552305,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:38:00 PM,14,2,1, +1881908,101548421,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:36:00 PM,14,2,1, +1881909,101548494,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:00:00 PM,14,2,1, +1881910,101548512,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:21:00 PM,14,2,1, +1881911,101548777,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:53:00 PM,14,2,1, +1881912,101548404,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:24:00 PM,14,2,1, +1881913,101548407,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1881914,101548479,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:34:00 PM,14,2,1, +1881915,101548528,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:33:00 PM,14,2,1, +1881916,101548549,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:50:00 PM,14,2,1, +1881917,101548560,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:23:00 PM,14,2,1, +1881918,101548566,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:28:00 PM,14,2,1, +1881919,101548570,23,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:57:00 PM,14,2,1, +1881920,101548580,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:22:00 PM,14,2,1, +1881921,101548682,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:49:00 PM,14,2,1, +1881922,101548700,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:23:00 PM,14,2,1, +1881923,101548729,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:26:00 PM,14,2,1, +1881924,101548758,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:45:00 PM,14,2,1, +1881925,101548782,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1881926,101548862,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:49:00 PM,14,2,1, +1881927,101548903,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:12:00 PM,14,2,1, +1881928,101548978,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:23:00 PM,14,2,1, +1881929,101549085,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:27:00 PM,14,2,1, +1881930,101549122,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:11:00 PM,14,2,1, +1881931,101549125,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:24:00 PM,14,2,1, +1881932,101549151,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:09:00 PM,14,2,1, +1881933,101549164,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:44:00 PM,14,2,1, +1881934,101549196,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:49:00 PM,14,2,1, +1881935,101549395,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:28:00 PM,14,2,1, +1881936,101549411,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1881937,101551943,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1881938,101551975,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:33:00 PM,14,2,1, +1881939,101552011,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:41:00 PM,14,2,1, +1881940,101552090,10,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:34:00 PM,14,2,1, +1881941,101552154,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:29:00 PM,14,2,1, +1881942,101552251,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:11:00 PM,14,2,1, +1881943,101552284,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:42:00 PM,14,2,1, +1881944,101552329,14,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:51:00 PM,14,2,1, +1881945,101552369,1,2196541,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1881946,101548434,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 7:12:00 PM,14,2,1, +1881947,101548443,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 7:17:00 PM,14,2,1, +1881948,101548487,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:16:00 PM,14,2,1, +1881949,101548542,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:48:00 PM,14,2,1, +1881950,101548550,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 7:35:00 PM,14,2,1, +1881951,101548572,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:29:00 PM,14,2,1, +1881952,101548583,3,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:47:00 PM,14,2,1, +1881953,101548611,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 10:22:00 PM,14,2,1, +1881954,101548631,2,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:52:00 PM,14,2,1, +1881955,101548709,3,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:13:00 PM,14,2,1, +1881956,101548743,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 7:42:00 PM,14,2,1, +1881957,101548766,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:19:00 PM,14,2,1, +1881958,101548791,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 6:57:00 PM,14,2,1, +1881959,101548808,3,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:44:00 PM,14,2,1, +1881960,101548824,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 6:46:00 PM,14,2,1, +1881961,101548838,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 7:10:00 PM,14,2,1, +1881962,101548847,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 10:25:00 PM,14,2,1, +1881963,101548848,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:09:00 PM,14,2,1, +1881964,101548982,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1881965,101548995,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:19:00 PM,14,2,1, +1881966,101549058,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:20:00 PM,14,2,1, +1881967,101549062,2,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:21:00 PM,14,2,1, +1881968,101549201,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 7:33:00 PM,14,2,1, +1881969,101549225,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 7:23:00 PM,14,2,1, +1881970,101549248,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 10:20:00 PM,14,2,1, +1881971,101549268,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:34:00 PM,14,2,1, +1881972,101549412,14,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 10:35:00 PM,14,2,1, +1881973,101549416,3,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:04:00 PM,14,2,1, +1881974,101549418,10,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:56:00 PM,14,2,1, +1881975,101551923,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:02:00 PM,14,2,1, +1881976,101551971,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:11:00 PM,14,2,1, +1881977,101551977,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 6:52:00 PM,14,2,1, +1881978,101551986,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 6:41:00 PM,14,2,1, +1881979,101551999,40,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 10:05:00 PM,14,2,1, +1881980,101552095,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:36:00 PM,14,2,1, +1881981,101552111,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:02:00 PM,14,2,1, +1881982,101552171,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:30:00 PM,14,2,1, +1881983,101552215,2,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 7:29:00 PM,14,2,1, +1881984,101552254,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1881985,101552257,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 7:02:00 PM,14,2,1, +1881986,101552268,3,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:53:00 PM,14,2,1, +1881987,101552277,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:17:00 PM,14,2,1, +1881988,101552282,10,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:27:00 PM,14,2,1, +1881989,101552300,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:11:00 PM,14,2,1, +1881990,101552326,40,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 10:36:00 PM,14,2,1, +1881991,101552333,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 9:05:00 PM,14,2,1, +1881992,101552335,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 7:30:00 PM,14,2,1, +1881993,101552348,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:43:00 PM,14,2,1, +1881994,101552376,3,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 10:02:00 PM,14,2,1, +1881995,101552397,1,2196542,92476,2196542,10/8/2021 10:51:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1881996,101548416,2,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:36:00 PM,14,2,1, +1881997,101548427,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:16:00 PM,14,2,1, +1881998,101548458,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1881999,101548474,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:31:00 PM,14,2,1, +1882000,101548476,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:12:00 PM,14,2,1, +1882001,101548529,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:42:00 PM,14,2,1, +1882002,101548530,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1882003,101548536,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1882004,101548555,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1882005,101548606,23,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:01:00 PM,14,2,1, +1882006,101548607,2,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:49:00 PM,14,2,1, +1882007,101548624,23,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:40:00 PM,14,2,1, +1882008,101548630,23,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:40:00 PM,14,2,1, +1882009,101548663,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1882010,101548691,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:44:00 PM,14,2,1, +1882011,101548706,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882012,101548740,23,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:28:00 PM,14,2,1, +1882013,101548802,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882014,101548827,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882015,101548899,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:11:00 PM,14,2,1, +1882016,101548931,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:33:00 PM,14,2,1, +1882017,101548947,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882018,101548948,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882019,101548967,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882020,101548979,2,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:20:00 PM,14,2,1, +1882021,101549011,2,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:37:00 PM,14,2,1, +1882022,101549019,2,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:02:00 PM,14,2,1, +1882023,101549043,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882024,101549067,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1882025,101549083,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:50:00 PM,14,2,1, +1882026,101549126,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882027,101549127,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:52:00 PM,14,2,1, +1882028,101549180,23,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:38:00 PM,14,2,1, +1882029,101549206,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882030,101549231,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:14:00 PM,14,2,1, +1882031,101549270,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1882032,101549280,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:54:00 PM,14,2,1, +1882033,101549427,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882034,101551931,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:04:00 PM,14,2,1, +1882035,101551944,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:55:00 PM,14,2,1, +1882036,101551952,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:40:00 PM,14,2,1, +1882037,101551952,10,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:44:00 PM,14,2,1, +1882038,101551974,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:30:00 PM,14,2,1, +1882039,101552014,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882040,101552016,40,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:52:00 PM,14,2,1, +1882041,101552091,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:01:00 PM,14,2,1, +1882042,101552104,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882043,101552138,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:10:00 PM,14,2,1, +1882044,101552182,2,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:42:00 PM,14,2,1, +1882045,101552184,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:41:00 PM,14,2,1, +1882046,101552212,23,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:19:00 PM,14,2,1, +1882047,101552225,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1882048,101552262,1,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 6:46:00 PM,14,2,1, +1882049,101552265,40,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:51:00 PM,14,2,1, +1882050,101552269,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1882051,101552310,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:22:00 PM,14,2,1, +1882052,101552341,3,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1882053,101552347,23,2197443,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:23:00 PM,14,2,1, +1882054,101552429,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:22:00 PM,14,2,1, +1882055,101552460,23,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882056,101552465,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:15:00 PM,14,2,1, +1882057,101552483,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:54:00 PM,14,2,1, +1882058,101552502,2,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:06:00 PM,14,2,1, +1882059,101552526,40,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:03:00 PM,14,2,1, +1882060,101552538,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:37:00 PM,14,2,1, +1882061,101552560,2,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:19:00 PM,14,2,1, +1882062,101552573,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:55:00 PM,14,2,1, +1882063,101552604,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:53:00 PM,14,2,1, +1882064,101552640,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:27:00 PM,14,2,1, +1882065,101552667,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:00:00 PM,14,2,1, +1882066,101552709,23,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:06:00 PM,14,2,1, +1882067,101552731,40,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:08:00 PM,14,2,1, +1882068,101552853,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:27:00 PM,14,2,1, +1882069,101552865,40,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:07:00 PM,14,2,1, +1882070,101552868,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:35:00 PM,14,2,1, +1882071,101552870,23,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:11:00 PM,14,2,1, +1882072,101552871,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:24:00 PM,14,2,1, +1882073,101552964,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:15:00 PM,14,2,1, +1882074,101552977,10,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:41:00 PM,14,2,1, +1882075,101553003,2,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:33:00 PM,14,2,1, +1882076,101553008,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:04:00 PM,14,2,1, +1882077,101553013,40,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:59:00 PM,14,2,1, +1882078,101553052,40,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:47:00 PM,14,2,1, +1882079,101553131,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:57:00 PM,14,2,1, +1882080,101553156,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:59:00 PM,14,2,1, +1882081,101553171,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:18:00 PM,14,2,1, +1882082,101553230,40,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:53:00 PM,14,2,1, +1882083,101553267,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:33:00 PM,14,2,1, +1882084,101553349,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:18:00 PM,14,2,1, +1882085,101553377,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:29:00 PM,14,2,1, +1882086,101553391,23,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:07:00 PM,14,2,1, +1882087,101553547,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:20:00 PM,14,2,1, +1882088,101553562,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:25:00 PM,14,2,1, +1882089,101553593,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:04:00 PM,14,2,1, +1882090,101553611,23,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:11:00 PM,14,2,1, +1882091,101553745,40,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:54:00 PM,14,2,1, +1882092,101553788,2,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:12:00 PM,14,2,1, +1882093,101555114,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:28:00 PM,14,2,1, +1882094,101555152,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:11:00 PM,14,2,1, +1882095,101555231,2,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:56:00 PM,14,2,1, +1882096,101555233,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:05:00 PM,14,2,1, +1882097,101555251,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:22:00 PM,14,2,1, +1882098,101555282,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:48:00 PM,14,2,1, +1882099,101555287,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:44:00 PM,14,2,1, +1882100,101555291,2,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:31:00 PM,14,2,1, +1882101,101555301,2,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:26:00 PM,14,2,1, +1882102,101555308,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:13:00 PM,14,2,1, +1882103,101555344,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:08:00 PM,14,2,1, +1882104,101555363,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:57:00 PM,14,2,1, +1882105,101555381,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:02:00 PM,14,2,1, +1882106,101555417,2,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:21:00 PM,14,2,1, +1882107,101555457,23,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:52:00 PM,14,2,1, +1882108,101555515,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:52:00 PM,14,2,1, +1882109,101555535,23,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:12:00 PM,14,2,1, +1882110,101555618,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:12:00 PM,14,2,1, +1882111,101555656,1,2197446,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:05:00 PM,14,2,1, +1882112,101548532,1,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:22:00 PM,14,2,1, +1882113,101548615,1,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:47:00 PM,14,2,1, +1882114,101548705,1,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:15:00 PM,14,2,1, +1882115,101548754,1,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:22:00 PM,14,2,1, +1882116,101548792,1,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:58:00 PM,14,2,1, +1882117,101548905,1,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:37:00 PM,14,2,1, +1882118,101548925,1,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 7:21:00 PM,14,2,1, +1882119,101549199,1,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:23:00 PM,14,2,1, +1882120,101552093,1,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:38:00 PM,14,2,1, +1882121,101552129,10,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:38:00 PM,14,2,1, +1882122,101552191,1,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 10:10:00 PM,14,2,1, +1882123,101552258,1,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 9:46:00 PM,14,2,1, +1882124,101552364,10,2196542,92476,2169650,10/8/2021 10:51:00 PM,10/8/2021 8:03:00 PM,14,2,1, diff --git a/parsons/__init__.py b/parsons/__init__.py index bbf5513b30..93a2464488 100644 --- a/parsons/__init__.py +++ b/parsons/__init__.py @@ -1,8 +1,5 @@ -# Provide shortcuts to importing Parsons submodules and set up logging -import importlib -import logging import os - +import logging # Provide shortcuts to importing Parsons submodules @@ -58,7 +55,6 @@ from parsons.quickbase.quickbase import Quickbase from parsons.actblue.actblue import ActBlue from parsons.mobilecommons.mobilecommons import MobileCommons - from parsons.etl.table import Table __all__ = [ 'VAN', @@ -111,94 +107,20 @@ 'MobileCommons' ] - # Define the default logging config for Parsons and its submodules. For now the # logger gets a StreamHandler by default. At some point a NullHandler may be more # appropriate, so the end user must decide on logging behavior. logger = logging.getLogger(__name__) _handler = logging.StreamHandler() -_formatter = logging.Formatter("%(module)s %(levelname)s %(message)s") +_formatter = logging.Formatter('%(module)s %(levelname)s %(message)s') _handler.setFormatter(_formatter) logger.addHandler(_handler) -if os.environ.get("TESTING"): +if os.environ.get('TESTING'): # Log less stuff in automated tests - logger.setLevel("WARNING") -elif os.environ.get("DEBUG"): - logger.setLevel("DEBUG") + logger.setLevel('WARNING') +elif os.environ.get('DEBUG'): + logger.setLevel('DEBUG') else: - logger.setLevel("INFO") - -# Table is referenced by many connectors, so we add it immediately to limit the damage -# of circular dependencies -__all__ = ["Table"] -for module_path, connector_name in ( - ("parsons.actblue.actblue", "ActBlue"), - ("parsons.action_kit.action_kit", "ActionKit"), - ("parsons.action_builder.action_builder", "ActionBuilder"), - ("parsons.action_network.action_network", "ActionNetwork"), - ("parsons.airtable.airtable", "Airtable"), - ("parsons.alchemer.alchemer", "Alchemer"), - ("parsons.alchemer.alchemer", "SurveyGizmo"), - ("parsons.auth0.auth0", "Auth0"), - ("parsons.aws.s3", "S3"), - ("parsons.azure.azure_blob_storage", "AzureBlobStorage"), - ("parsons.bill_com.bill_com", "BillCom"), - ("parsons.bloomerang.bloomerang", "Bloomerang"), - ("parsons.bluelink", "Bluelink"), - ("parsons.box.box", "Box"), - ("parsons.braintree.braintree", "Braintree"), - ("parsons.capitol_canary.capitol_canary", "CapitolCanary"), - ("parsons.civis.civisclient", "CivisClient"), - ("parsons.controlshift.controlshift", "Controlshift"), - ("parsons.copper.copper", "Copper"), - ("parsons.crowdtangle.crowdtangle", "CrowdTangle"), - ("parsons.databases.database_connector", "DatabaseConnector"), - ("parsons.databases.discover_database", "discover_database"), - ("parsons.databases.db_sync", "DBSync"), - ("parsons.databases.mysql.mysql", "MySQL"), - ("parsons.databases.postgres.postgres", "Postgres"), - ("parsons.databases.redshift.redshift", "Redshift"), - ("parsons.donorbox.donorbox", "Donorbox"), - ("parsons.facebook_ads.facebook_ads", "FacebookAds"), - ("parsons.freshdesk.freshdesk", "Freshdesk"), - ("parsons.geocode.census_geocoder", "CensusGeocoder"), - ("parsons.github.github", "GitHub"), - ("parsons.google.google_admin", "GoogleAdmin"), - ("parsons.google.google_bigquery", "GoogleBigQuery"), - ("parsons.google.google_civic", "GoogleCivic"), - ("parsons.google.google_cloud_storage", "GoogleCloudStorage"), - ("parsons.google.google_sheets", "GoogleSheets"), - ("parsons.hustle.hustle", "Hustle"), - ("parsons.mailchimp.mailchimp", "Mailchimp"), - ("parsons.mobilize_america.ma", "MobilizeAmerica"), - ("parsons.nation_builder.nation_builder", "NationBuilder"), - ("parsons.newmode.newmode", "Newmode"), - ("parsons.ngpvan.van", "VAN"), - ("parsons.notifications.gmail", "Gmail"), - ("parsons.notifications.slack", "Slack"), - ("parsons.notifications.smtp", "SMTP"), - ("parsons.pdi.pdi", "PDI"), - ("parsons.phone2action.p2a", "Phone2Action"), - ("parsons.quickbase.quickbase", "Quickbase"), - ("parsons.redash.redash", "Redash"), - ("parsons.rockthevote.rtv", "RockTheVote"), - ("parsons.salesforce.salesforce", "Salesforce"), - ("parsons.scytl.scytl", "Scytl"), - ("parsons.sftp.sftp", "SFTP"), - ("parsons.shopify.shopify", "Shopify"), - ("parsons.sisense.sisense", "Sisense"), - ("parsons.targetsmart.targetsmart_api", "TargetSmartAPI"), - ("parsons.targetsmart.targetsmart_automation", "TargetSmartAutomation"), - ("parsons.turbovote.turbovote", "TurboVote"), - ("parsons.twilio.twilio", "Twilio"), - ("parsons.zoom.zoom", "Zoom"), -): - try: - globals()[connector_name] = getattr( - importlib.import_module(module_path), connector_name - ) - __all__.append(connector_name) - except ImportError: - logger.debug(f"Could not import {module_path}.{connector_name}; skipping") + logger.setLevel('INFO') diff --git a/parsons/actblue/__init__.py b/parsons/actblue/__init__.py index 385bce3cd2..f06b65b856 100644 --- a/parsons/actblue/__init__.py +++ b/parsons/actblue/__init__.py @@ -1,3 +1,5 @@ from parsons.actblue.actblue import ActBlue -__all__ = ["ActBlue"] +__all__ = [ + 'ActBlue' +] diff --git a/parsons/actblue/actblue.py b/parsons/actblue/actblue.py index a6d9f57281..5b05543f56 100644 --- a/parsons/actblue/actblue.py +++ b/parsons/actblue/actblue.py @@ -31,27 +31,18 @@ class ActBlue(object): visit https://secure.actblue.com/docs/csv_api#authentication. """ - def __init__( - self, actblue_client_uuid=None, actblue_client_secret=None, actblue_uri=None - ): - self.actblue_client_uuid = check_env.check( - "ACTBLUE_CLIENT_UUID", actblue_client_uuid - ) - self.actblue_client_secret = check_env.check( - "ACTBLUE_CLIENT_SECRET", actblue_client_secret - ) - self.uri = ( - check_env.check("ACTBLUE_URI", actblue_uri, optional=True) - or ACTBLUE_API_ENDPOINT - ) + def __init__(self, actblue_client_uuid=None, actblue_client_secret=None, actblue_uri=None): + self.actblue_client_uuid = check_env.check('ACTBLUE_CLIENT_UUID', actblue_client_uuid) + self.actblue_client_secret = check_env.check('ACTBLUE_CLIENT_SECRET', actblue_client_secret) + self.uri = check_env.check( + 'ACTBLUE_URI', actblue_uri, optional=True + ) or ACTBLUE_API_ENDPOINT self.headers = { "accept": "application/json", } - self.client = APIConnector( - self.uri, - auth=(self.actblue_client_uuid, self.actblue_client_secret), - headers=self.headers, - ) + self.client = APIConnector(self.uri, + auth=(self.actblue_client_uuid, self.actblue_client_secret), + headers=self.headers) def post_request(self, csv_type=None, date_range_start=None, date_range_end=None): """ @@ -84,11 +75,9 @@ def post_request(self, csv_type=None, date_range_start=None, date_range_end=None body = { "csv_type": csv_type, "date_range_start": date_range_start, - "date_range_end": date_range_end, + "date_range_end": date_range_end } - logger.info( - f"Requesting {csv_type} from {date_range_start} up to {date_range_end}." - ) + logger.info(f'Requesting {csv_type} from {date_range_start} up to {date_range_end}.') response = self.client.post_request(url="csvs", json=body) return response @@ -106,7 +95,7 @@ def get_download_url(self, csv_id=None): """ response = self.client.get_request(url=f"csvs/{csv_id}") - return response["download_url"] + return response['download_url'] def poll_for_download_url(self, csv_id): """ @@ -123,14 +112,14 @@ def poll_for_download_url(self, csv_id): it expires, it could be used by anyone to download the CSV. """ - logger.info("Request received. Please wait while ActBlue generates this data.") + logger.info('Request received. Please wait while ActBlue generates this data.') download_url = None while download_url is None: download_url = self.get_download_url(csv_id) time.sleep(POLLING_DELAY) - logger.info("Completed data generation.") - logger.info("Beginning conversion to Parsons Table.") + logger.info('Completed data generation.') + logger.info('Beginning conversion to Parsons Table.') return download_url def get_contributions(self, csv_type, date_range_start, date_range_end): @@ -160,11 +149,9 @@ def get_contributions(self, csv_type, date_range_start, date_range_end): Contents of the generated contribution CSV as a Parsons table. """ - post_request_response = self.post_request( - csv_type, date_range_start, date_range_end - ) + post_request_response = self.post_request(csv_type, date_range_start, date_range_end) csv_id = post_request_response["id"] download_url = self.poll_for_download_url(csv_id) table = Table.from_csv(download_url) - logger.info("Completed conversion to Parsons Table.") + logger.info('Completed conversion to Parsons Table.') return table diff --git a/parsons/action_builder/__init__.py b/parsons/action_builder/__init__.py deleted file mode 100644 index 82c465cf6c..0000000000 --- a/parsons/action_builder/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from parsons.action_builder.action_builder import ActionBuilder - -__all__ = ["ActionBuilder"] diff --git a/parsons/action_builder/action_builder.py b/parsons/action_builder/action_builder.py deleted file mode 100644 index da6fb7c59b..0000000000 --- a/parsons/action_builder/action_builder.py +++ /dev/null @@ -1,421 +0,0 @@ -import json -from parsons import Table -from parsons.utilities import check_env -from parsons.utilities.api_connector import APIConnector -import logging - -logger = logging.getLogger(__name__) - -API_URL = "https://{subdomain}.actionbuilder.org/api/rest/v1" - - -class ActionBuilder(object): - """ - `Args:` - api_token: str - The OSDI API token - subdomain: str - The part of the web app URL preceding '.actionbuilder.org' - campaign: str - Optional. The 36-character "interact ID" of the campaign whose data is to be retrieved - or edited. Can also be supplied in individual methods in case multiple campaigns need - to be referenced. - """ - - def __init__(self, api_token=None, subdomain=None, campaign=None): - self.api_token = check_env.check("ACTION_BUILDER_API_TOKEN", api_token) - self.headers = { - "Content-Type": "application/json", - "OSDI-API-Token": self.api_token, - } - self.api_url = API_URL.format(subdomain=subdomain) - self.api = APIConnector(self.api_url, headers=self.headers) - self.campaign = campaign - - def _campaign_check(self, campaign): - # Raise an error if campaign is not provided via instatiation nor method argument - - final_campaign = campaign or self.campaign - if not final_campaign: - raise ValueError("No campaign provided!") - - return final_campaign - - def _get_page(self, campaign, object_name, page, per_page=25, filter=None): - # Returns data from one page of results - - campaign = self._campaign_check(campaign) - - if per_page > 25: - per_page = 25 - logger.info( - "Action Builder's API will not return more than 25 entries per page. \ - Changing per_page parameter to 25." - ) - - params = {"page": page, "per_page": per_page, "filter": filter} - - url = f"campaigns/{campaign}/{object_name}" - - return self.api.get_request(url=url, params=params) - - def _get_all_records( - self, campaign, object_name, limit=None, per_page=25, filter=None - ): - # Returns a list of entries for a given object, such as people, tags, or connections. - # See Action Builder API docs for more: https://www.actionbuilder.org/docs/v1/index.html - - count = 0 - page = 1 - return_list = [] - - # Keep getting the next page until record limit is exceeded or an empty result returns - while True: - # Get this page and increase page number to the next one - response = self._get_page( - campaign, object_name, page, per_page, filter=filter - ) - page = page + 1 - - # Check that there's actually data - response_list = response.get("_embedded", {}).get(f"osdi:{object_name}") - - if not response_list: - # This page has no data, so we're done - return Table(return_list) - - # Assuming there's data, add it to the running response list - return_list.extend(response_list) - count = count + len(response_list) - if limit: - if count >= limit: - # Limit reached or exceeded, so return just the requested limit amount - return Table(return_list[0:limit]) - - def get_campaign_tags(self, campaign=None, limit=None, per_page=25, filter=None): - """ - Retrieve all tags (i.e. custom field values) within provided limit and filters - `Args:` - campaign: str - Optional. The 36-character "interact ID" of the campaign whose data is to be - retrieved or edited. Not necessary if supplied when instantiating the class. - limit: int - The number of entries to return. When None, returns all entries. - per_page: int - The number of entries per page to return. 25 maximum and default. - filter - The OData query for filtering results. E.g. "modified_date gt '2014-03-25'". - When None, no filter is applied. - `Returns:` - Parsons Table of full set of tags available in Action Builder. - """ - - return self._get_all_records( - campaign, "tags", limit=limit, per_page=per_page, filter=filter - ) - - def get_tag_by_name(self, tag_name, campaign=None): - """ - Convenience method to retrieve data on a single tag by its name/value - `Args:` - tag_name: str - The value of the tag to search for. - campaign: str - Optional. The 36-character "interact ID" of the campaign whose data is to be - retrieved or edited. Not necessary if supplied when instantiating the class. - `Returns:` - Parsons Table of data found on tag in Action Builder from searching by name. - """ - - filter = f"name eq '{tag_name}'" - - return self.get_campaign_tags(campaign=campaign, filter=filter) - - def insert_new_tag(self, tag_name, tag_field, tag_section, campaign=None): - """ - Load a new tag value into Action Builder. Required before applying the value to any entity - records. - `Args:` - tag_name: str - The name of the new tag, i.e. the custom field value. - tag_field: str - The name of the tag category, i.e. the custom field name. - tag_section: str - The name of the tag section, i.e. the custom field group name. - campaign: str - Optional. The 36-character "interact ID" of the campaign whose data is to be - retrieved or edited. Not necessary if supplied when instantiating the class. - `Returns:` - Dict containing Action Builder tag data. - """ - - campaign = self._campaign_check(campaign) - url = f"campaigns/{campaign}/tags" - - data = { - "name": tag_name, - "action_builder:field": tag_field, - "action_builder:section": tag_section, - } - - return self.api.post_request(url=url, data=json.dumps(data)) - - def _upsert_entity(self, data, campaign): - # Internal method leveraging the record signup helper endpoint to upsert entity records - - url = f"campaigns/{campaign}/people" - - return self.api.post_request(url=url, data=json.dumps(data)) - - def insert_entity_record(self, entity_type, data=None, campaign=None): - """ - Load a new entity record in Action Builder of the type provided. - `Args:` - entity_type: str - The name of the record type being inserted. Required if identifiers are not - provided. - data: dict - The details to include on the record being upserted, to be included as the value - of the `person` key. See - [documentation for the Person Signup Helper](https://www.actionbuilder.org/docs/v1/person_signup_helper.html#post) - for examples, and - [the Person endpoint](https://www.actionbuilder.org/docs/v1/people.html#field-names) - for full entity object composition. - campaign: str - Optional. The 36-character "interact ID" of the campaign whose data is to be - retrieved or edited. Not necessary if supplied when instantiating the class. - `Returns:` - Dict containing Action Builder entity data. - """ # noqa: E501 - - name_keys = ("name", "action_builder:name", "given_name") - error = "Must provide data with name or given_name when inserting new record" - if not isinstance(data, dict): - raise ValueError(error) - name_check = [key for key in data.get("person", {}) if key in name_keys] - if not name_check: - raise ValueError(error) - - campaign = self._campaign_check(campaign) - - if not isinstance(data, dict): - data = {} - - if "person" not in data: - # The POST data must live inside of person key - data["person"] = {} - - data["person"]["action_builder:entity_type"] = entity_type - - return self._upsert_entity(data=data, campaign=campaign) - - def update_entity_record(self, identifier, data, campaign=None): - """ - Update an entity record in Action Builder based on the identifier passed. - `Args:` - identifier: str - The unique identifier for a record being updated. ID strings will need to begin - with the origin system, followed by a colon, e.g. `action_builder:abc123-...`. - data: dict - The details to include on the record being upserted, to be included as the value - of the `person` key. See - [documentation for the Person Signup Helper](https://www.actionbuilder.org/docs/v1/person_signup_helper.html#post) - for examples, and - [the Person endpoint](https://www.actionbuilder.org/docs/v1/people.html#field-names) - for full entity object composition. - campaign: str - Optional. The 36-character "interact ID" of the campaign whose data is to be - retrieved or edited. Not necessary if supplied when instantiating the class. - `Returns:` - Dict containing Action Builder entity data. - """ # noqa: E501 - - campaign = self._campaign_check(campaign) - - if isinstance(identifier, str): - # Ensure identifier is a list, even though singular string is called for - identifier = [identifier] - - # Default to assuming identifier comes from Action Builder and add prefix if missing - identifiers = [ - f"action_builder:{id}" if ":" not in id else id for id in identifier - ] - - if not isinstance(data, dict): - data = {} - - if "person" not in data: - # The POST data must live inside of person key - data["person"] = {} - - data["person"]["identifiers"] = identifiers - - return self._upsert_entity(data=data, campaign=campaign) - - def add_section_field_values_to_record( - self, identifier, section, field_values, campaign=None - ): - """ - Add one or more tags (i.e. custom field value) to an existing entity record in Action - Builder. The tags, along with their field and section, must already exist (except for - date fields). - `Args:` - identifier: str - The unique identifier for a record being updated. ID strings will need to begin - with the origin system, followed by a colon, e.g. `action_builder:abc123-...`. - section: str - The name of the tag section, i.e. the custom field group name. - field_values: dict - A collection of field names and tags stored as keys and values. - campaign: str - Optional. The 36-character "interact ID" of the campaign whose data is to be - retrieved or edited. Not necessary if supplied when instantiating the class. - `Returns:` - Dict containing Action Builder entity data of the entity being tagged. - """ - - tag_data = [ - { - "action_builder:name": tag, - "action_builder:field": field, - "action_builder:section": section, - } - for field, tag in field_values.items() - ] - - data = {"add_tags": tag_data} - - return self.update_entity_record( - identifier=identifier, data=data, campaign=campaign - ) - - def remove_tagging( - self, - identifier=None, - tag_id=None, - tag_name=None, - tagging_id=None, - campaign=None, - ): - """ - Remove one or more tags (i.e. custom field value) from an existing entity or connection - record in Action Builder. The basis for this end point is the combination of the tag's - interact ID and that of the specific tagging. The tag ID can usually be determined from - the tag's name, and the tagging ID can be derived if the identifier of the entity or - connection record is supplied instead. - `Args:` - identifier: str - Optional. The unique identifier for an entity or connection record being updated. - If omitted, `tagging_id` must be provided. - tag_id: str - Optional. The unique identifier for the tag being removed. If omitted, `tag_name` - must be provided. - tag_name: str - Optional. The exact name of the tag being removed. May result in an error if - multiple tags (in different fields/sections) have the same name. If omitted, - `tag_id` must be provided. - tagging_id: str - Optional. The unique identifier for the specific application of the tag to an - individual entity or connection record. If omitted, `identifier` must be provided. - campaign: str - Optional. The 36-character "interact ID" of the campaign whose data is to be - retrieved or edited. Not necessary if supplied when instantiating the class. - `Returns:` - API response JSON which contains `{'message': 'Tag has been removed from Taggable - Logbook'}` if successful. - """ - - if {tag_name, tag_id} == {None}: - raise ValueError("Please supply a tag_name or tag_id!") - - if {identifier, tagging_id} == {None}: - raise ValueError( - "Please supply an entity or connection identifier, or a tagging id!" - ) - - campaign = self._campaign_check(campaign) - endpoint = "tags/{}/taggings" - - if tag_name and {tag_id, tagging_id} == {None}: - tag_data = self.get_tag_by_name(tag_name, campaign=campaign) - tag_count = tag_data.num_rows - - if tag_count > 1: - error_msg = f"Found {tag_count} tags with this name. " - error_msg += "Try with using the unique interact ID" - raise ValueError(error_msg) - - tag_id = tag_data["identifiers"][0][0].split(":")[1] - logger.info(f"Tag {tag_name} has ID {tag_id}") - - if tagging_id and not tag_id: - raise ValueError("Cannot search based on tagging ID alone.") - - if tag_id and not tagging_id: - taggings = self._get_all_records(self.campaign, endpoint.format(tag_id)) - taggings_filtered = taggings.select_rows( - lambda row: identifier - in row["_links"]["action_builder:connection"]["href"] - if row["item_type"] == "connection" - else identifier in row["osdi:person"]["href"] - ) - tagging_id = [ - x.split(":")[1] - for x in taggings_filtered["identifiers"][0] - if "action_builder" in x - ][0] - - logger.info(f"Removing tag {tag_id} from {identifier or tagging_id}") - return self.api.delete_request( - f"campaigns/{campaign}/{endpoint.format(tag_id)}/{tagging_id}" - ) - - def upsert_connection(self, identifiers, tag_data=None, campaign=None): - """ - Load or update a connection record in Action Builder between two existing entity records. - Only one connection record is allowed per pair of entities, so if the connection already - exists, this method will update, but will otherwise create a new connection record. - `Args:` - identifiers: list - A list of two unique identifier strings for records being connected. ID strings - will need to begin with the origin system, followed by a colon, e.g. - `action_builder:abc123-...`. Requires exactly two identifiers. - tag_data: list - List of dicts of tags to be added to the connection record (i.e. Connection Info). - See [documentation on Connection Helper](https://www.actionbuilder.org/docs/v1/connection_helper.html#post) - for examples. - campaign: str - Optional. The 36-character "interact ID" of the campaign whose data is to be - retrieved or edited. Not necessary if supplied when instantiating the class. - `Returns:` - Dict containing Action Builder connection data. - """ # noqa: E501 - - # Check that there are exactly two identifiers and that campaign is provided first - if not isinstance(identifiers, list): - raise ValueError("Must provide identifiers as a list") - - if len(identifiers) != 2: - raise ValueError("Must provide exactly two identifiers") - - campaign = self._campaign_check(campaign) - - url = f"campaigns/{campaign}/people/{identifiers[0]}/connections" - - data = { - "connection": { - # person_id is used even if entity is not Person - "person_id": identifiers[1] - } - } - - if tag_data: - if isinstance(tag_data, dict): - tag_data = [tag_data] - - if not isinstance(tag_data[0], dict): - raise ValueError("Must provide tag_data as a dict or list of dicts") - - data["add_tags"] = tag_data - - return self.api.post_request(url=url, data=json.dumps(data)) diff --git a/parsons/action_kit/__init__.py b/parsons/action_kit/__init__.py index b4ecd76c2a..e4cb3e1466 100644 --- a/parsons/action_kit/__init__.py +++ b/parsons/action_kit/__init__.py @@ -1,3 +1,5 @@ from parsons.action_kit.action_kit import ActionKit -__all__ = ["ActionKit"] +__all__ = [ + 'ActionKit' +] diff --git a/parsons/action_kit/action_kit.py b/parsons/action_kit/action_kit.py index 6e346f6674..5ee070258d 100644 --- a/parsons/action_kit/action_kit.py +++ b/parsons/action_kit/action_kit.py @@ -25,18 +25,18 @@ class ActionKit(object): env variable set. """ - _default_headers = { - "content-type": "application/json", - "accepts": "application/json", - } + _default_headers = {'content-type': 'application/json', + 'accepts': 'application/json'} def __init__(self, domain=None, username=None, password=None): - self.domain = check_env.check("ACTION_KIT_DOMAIN", domain) - self.username = check_env.check("ACTION_KIT_USERNAME", username) - self.password = check_env.check("ACTION_KIT_PASSWORD", password) + + self.domain = check_env.check('ACTION_KIT_DOMAIN', domain) + self.username = check_env.check('ACTION_KIT_USERNAME', username) + self.password = check_env.check('ACTION_KIT_PASSWORD', password) self.conn = self._conn() def _conn(self, default_headers=_default_headers): + client = requests.Session() client.auth = (self.username, self.password) client.headers.update(default_headers) @@ -45,10 +45,10 @@ def _conn(self, default_headers=_default_headers): def _base_endpoint(self, endpoint, entity_id=None): # Create the base endpoint URL - url = f"https://{self.domain}/rest/v1/{endpoint}/" + url = f'https://{self.domain}/rest/v1/{endpoint}/' if entity_id: - return url + f"{entity_id}/" + return url + f'{entity_id}/' return url def _base_get(self, endpoint, entity_id=None, exception_message=None, params=None): @@ -57,6 +57,7 @@ def _base_get(self, endpoint, entity_id=None, exception_message=None, params=Non resp = self.conn.get(self._base_endpoint(endpoint, entity_id), params=params) if exception_message and resp.status_code == 404: raise Exception(self.parse_error(resp, exception_message)) + return resp.json() def _base_post(self, endpoint, exception_message, return_full_json=False, **kwargs): @@ -69,8 +70,8 @@ def _base_post(self, endpoint, exception_message, return_full_json=False, **kwar # Some of the methods should just return pointer to location of created # object. - if "headers" in resp.__dict__ and not return_full_json: - return resp.__dict__["headers"]["Location"] + if 'headers' in resp.__dict__ and not return_full_json: + return resp.__dict__['headers']['Location'] # Not all responses return a json try: @@ -83,12 +84,12 @@ def parse_error(self, resp, exception_message): # AK provides some pretty robust/helpful error reporting. We should surface them with # our exceptions. - if "errors" in resp.json().keys(): - if isinstance(resp.json()["errors"], list): - exception_message += "\n" + ",".join(resp.json()["errors"]) + if 'errors' in resp.json().keys(): + if isinstance(resp.json()['errors'], list): + exception_message += '\n' + ','.join(resp.json()['errors']) else: - for k, v in resp.json()["errors"].items(): - exception_message += str("\n" + k + ": " + ",".join(v)) + for k, v in resp.json()['errors'].items(): + exception_message += str('\n' + k + ': ' + ','.join(v)) return exception_message @@ -103,9 +104,8 @@ def get_user(self, user_id): User json object """ - return self._base_get( - endpoint="user", entity_id=user_id, exception_message="User not found" - ) + return self._base_get(endpoint='user', entity_id=user_id, + exception_message='User not found') def get_user_fields(self): """ @@ -116,9 +116,9 @@ def get_user_fields(self): List of user fields """ - resp = self._base_get(endpoint="user/schema") + resp = self._base_get(endpoint='user/schema') - return list(resp["fields"].keys()) + return list(resp['fields'].keys()) def create_user(self, email, **kwargs): """ @@ -135,26 +135,8 @@ def create_user(self, email, **kwargs): User json object """ - return self._base_post( - endpoint="user", - exception_message="Could not create user", - email=email, - **kwargs, - ) - - def delete_actionfield(self, actionfield_id): - """ - Delete an actionfield. - - `Args:` - actionfield_id: int - The id of the actionfield to delete - `Returns:` - ``None`` - """ - - resp = self.conn.delete(self._base_endpoint("actionfield", actionfield_id)) - logger.info(f"{resp.status_code}: {actionfield_id}") + return self._base_post(endpoint='user', exception_message='Could not create user', + email=email, **kwargs) def update_user(self, user_id, **kwargs): """ @@ -171,10 +153,8 @@ def update_user(self, user_id, **kwargs): ``None`` """ - resp = self.conn.patch( - self._base_endpoint("user", user_id), data=json.dumps(kwargs) - ) - logger.info(f"{resp.status_code}: {user_id}") + resp = self.conn.patch(self._base_endpoint('user', user_id), data=json.dumps(kwargs)) + logger.info(f'{resp.status_code}: {user_id}') def get_event(self, event_id): """Get an event. @@ -206,12 +186,28 @@ def get_events(self, limit=None, **kwargs): .. code-block:: python - ak.get_events(name__contains="FirstName") + ak.get_events(fields__name__contains="FirstName") `Returns:` Parsons.Table The events data. """ - return self.paginated_get("event", limit=limit, **kwargs) + # "The maximum number of objects returned per request is 100. Use paging + # to get more objects." + # (https://roboticdogs.actionkit.com/docs//manual/api/rest/overview.html#ordering) + # get `limit` events if it's provided, otherwise get 100 + kwargs["_limit"] = min(100, limit or 1_000_000_000) + json_data = self._base_get("event", params=kwargs) + data = json_data["objects"] + + next_url = json_data.get("meta", {}).get("next") + while next_url: + resp = self.conn.get(f'https://{self.domain}{next_url}') + data += resp.json().get("objects", []) + next_url = resp.json().get("meta", {}).get("next") + if limit and len(data) >= limit: + break + + return Table(data[:limit]) def update_event(self, event_id, **kwargs): """ @@ -228,67 +224,8 @@ def update_event(self, event_id, **kwargs): ``None`` """ - resp = self.conn.patch( - self._base_endpoint("event", event_id), data=json.dumps(kwargs) - ) - logger.info(f"{resp.status_code}: {event_id}") - - def get_blackholed_email(self, email): - """ - Get a blackholed email. A blackholed email is an email that has been prevented from - receiving bulk and transactional emails from ActionKit. `Documentation `_. - - `Args:` - email: str - Blackholed email of the record to get. - `Returns`: - Parsons.Table - The blackholed email data. - """ - - return self.paginated_get("blackholedemail", email=email) - - def blackhole_email(self, email): - """ - Prevent an email from receiving bulk and transactional emails from ActionKit. - `Documentation `_. - - `Args:` - user_id: str - Email to blackhole - `Returns:` - API location of new resource - """ - - return self._base_post( - endpoint="blackholedemail", - exception_message="Could not blackhole email", - email=email, - ) - - def delete_user_data(self, email, **kwargs): - """ - Delete user data. - - `Args:` - email: str - Email of user to delete data - **kwargs: - Optional arguments and fields to pass to the client. A full list can be found - in the `ActionKit API Documentation `_. - `Returns:` - API location of anonymized user - """ - - return self._base_post( - endpoint="eraser", - exception_message="Could not delete user data", - email=email, - **kwargs, - ) + resp = self.conn.patch(self._base_endpoint('event', event_id), data=json.dumps(kwargs)) + logger.info(f'{resp.status_code}: {event_id}') def delete_user(self, user_id): """ @@ -301,8 +238,8 @@ def delete_user(self, user_id): ``None`` """ - resp = self.conn.delete(self._base_endpoint("user", user_id)) - logger.info(f"{resp.status_code}: {user_id}") + resp = self.conn.delete(self._base_endpoint('user', user_id)) + logger.info(f'{resp.status_code}: {user_id}') def get_campaign(self, campaign_id): """ @@ -315,11 +252,8 @@ def get_campaign(self, campaign_id): Campaign json object """ - return self._base_get( - endpoint="campaign", - entity_id=campaign_id, - exception_message="Campaign not found", - ) + return self._base_get(endpoint='campaign', entity_id=campaign_id, + exception_message='Campaign not found') def get_campaign_fields(self): """ @@ -330,8 +264,8 @@ def get_campaign_fields(self): List of campaign fields """ - resp = self._base_get(endpoint="campaign/schema") - return list(resp["fields"].keys()) + resp = self._base_get(endpoint='campaign/schema') + return list(resp['fields'].keys()) def create_campaign(self, name, **kwargs): """ @@ -348,12 +282,8 @@ def create_campaign(self, name, **kwargs): API location of new resource """ - return self._base_post( - endpoint="campaign", - exception_message="Could not create campaign", - name=name, - **kwargs, - ) + return self._base_post(endpoint='campaign', exception_message='Could not create campaign', + name=name, **kwargs) def get_event_create_page(self, event_create_page_id): """ @@ -366,11 +296,8 @@ def get_event_create_page(self, event_create_page_id): Event create page json object """ - return self._base_get( - endpoint="eventcreatepage", - entity_id=event_create_page_id, - exception_message="Event create page not found", - ) + return self._base_get(endpoint='eventcreatepage', entity_id=event_create_page_id, + exception_message='Event create page not found') def get_event_create_page_fields(self): """ @@ -381,8 +308,8 @@ def get_event_create_page_fields(self): List of event create page fields """ - resp = self._base_get(endpoint="eventcreatepage/schema") - return list(resp["fields"].keys()) + resp = self._base_get(endpoint='eventcreatepage/schema') + return list(resp['fields'].keys()) def create_event_create_page(self, name, campaign_id, title, **kwargs): """ @@ -403,14 +330,12 @@ def create_event_create_page(self, name, campaign_id, title, **kwargs): API location of new resource """ - return self._base_post( - endpoint="eventcreatepage", - exception_message="Could not create event create page", - campaign=f"/rest/v1/campaign/{campaign_id}/", - name=name, - title=title, - **kwargs, - ) + return self._base_post(endpoint='eventcreatepage', + exception_message='Could not create event create page', + campaign=f'/rest/v1/campaign/{campaign_id}/', + name=name, + title=title, + **kwargs) def get_event_create_form(self, event_create_form_id): """ @@ -423,11 +348,8 @@ def get_event_create_form(self, event_create_form_id): Event create form json object """ - return self._base_get( - endpoint="eventcreateform", - entity_id=event_create_form_id, - exception_message="Event create page not found", - ) + return self._base_get(endpoint='eventcreateform', entity_id=event_create_form_id, + exception_message='Event create page not found') def get_event_create_form_fields(self): """ @@ -438,8 +360,8 @@ def get_event_create_form_fields(self): List of event create form fields """ - resp = self._base_get(endpoint="eventcreateform/schema") - return list(resp["fields"].keys()) + resp = self._base_get(endpoint='eventcreateform/schema') + return list(resp['fields'].keys()) def create_event_create_form(self, page_id, thank_you_text, **kwargs): """ @@ -458,13 +380,11 @@ def create_event_create_form(self, page_id, thank_you_text, **kwargs): API location of new resource """ - return self._base_post( - endpoint="eventcreateform", - exception_message="Could not event create form", - page=f"/rest/v1/eventcreatepage/{page_id}/", - thank_you_text=thank_you_text, - **kwargs, - ) + return self._base_post(endpoint='eventcreateform', + exception_message='Could not event create form', + page=f'/rest/v1/eventcreatepage/{page_id}/', + thank_you_text=thank_you_text, + **kwargs) def get_event_signup_page(self, event_signup_page_id): """ @@ -477,11 +397,8 @@ def get_event_signup_page(self, event_signup_page_id): Event signup page json object """ - return self._base_get( - endpoint="eventsignuppage", - entity_id=event_signup_page_id, - exception_message="User page signup page not found", - ) + return self._base_get(endpoint='eventsignuppage', entity_id=event_signup_page_id, + exception_message='User page signup page not found') def get_event_signup_page_fields(self): """ @@ -492,8 +409,8 @@ def get_event_signup_page_fields(self): List of event signup page fields """ - resp = self._base_get(endpoint="eventsignuppage/schema") - return list(resp["fields"].keys()) + resp = self._base_get(endpoint='eventsignuppage/schema') + return list(resp['fields'].keys()) def create_event_signup_page(self, name, campaign_id, title, **kwargs): """ @@ -514,14 +431,12 @@ def create_event_signup_page(self, name, campaign_id, title, **kwargs): API location of new resource """ - return self._base_post( - endpoint="eventsignuppage", - exception_message="Could not create signup page", - campaign=f"/rest/v1/campaign/{campaign_id}/", - name=name, - title=title, - **kwargs, - ) + return self._base_post(endpoint='eventsignuppage', + exception_message='Could not create signup page', + campaign=f'/rest/v1/campaign/{campaign_id}/', + name=name, + title=title, + **kwargs) def get_event_signup_form(self, event_signup_form_id): """ @@ -534,11 +449,8 @@ def get_event_signup_form(self, event_signup_form_id): Event signup form json object """ - return self._base_get( - endpoint="eventsignupform", - entity_id=event_signup_form_id, - exception_message="User page signup form not found", - ) + return self._base_get(endpoint='eventsignupform', entity_id=event_signup_form_id, + exception_message='User page signup form not found') def get_event_signup_form_fields(self): """ @@ -549,8 +461,8 @@ def get_event_signup_form_fields(self): List of event signup form fields """ - resp = self._base_get(endpoint="eventsignupform/schema") - return list(resp["fields"].keys()) + resp = self._base_get(endpoint='eventsignupform/schema') + return list(resp['fields'].keys()) def create_event_signup_form(self, page_id, thank_you_text, **kwargs): """ @@ -569,13 +481,11 @@ def create_event_signup_form(self, page_id, thank_you_text, **kwargs): API location of new resource """ - return self._base_post( - endpoint="eventsignupform", - exception_message="Could not event create signup form", - page=f"/rest/v1/page/{page_id}/", - thank_you_text=thank_you_text, - **kwargs, - ) + return self._base_post(endpoint='eventsignupform', + exception_message='Could not event create signup form', + page=f'/rest/v1/page/{page_id}/', + thank_you_text=thank_you_text, + **kwargs) def update_event_signup(self, event_signup_id, **kwargs): """ @@ -594,10 +504,9 @@ def update_event_signup(self, event_signup_id, **kwargs): ``None`` """ - resp = self.conn.patch( - self._base_endpoint("eventsignup", event_signup_id), data=json.dumps(kwargs) - ) - logger.info(f"{resp.status_code}: {event_signup_id}") + resp = self.conn.patch(self._base_endpoint('eventsignup', event_signup_id), + data=json.dumps(kwargs)) + logger.info(f'{resp.status_code}: {event_signup_id}') def get_mailer(self, entity_id): """ @@ -610,7 +519,7 @@ def get_mailer(self, entity_id): Mailer json object """ - return self._base_get(endpoint="mailer", entity_id=entity_id) + return self._base_get(endpoint='mailer', entity_id=entity_id) def create_mailer(self, **kwargs): """ @@ -625,19 +534,16 @@ def create_mailer(self, **kwargs): URI of new mailer """ - return self._base_post( - endpoint="mailer", exception_message="Could not create mailer", **kwargs - ) + return self._base_post(endpoint='mailer', exception_message='Could not create mailer', + **kwargs) def copy_mailer(self, mailer_id): """ copy a mailer returns new copy of mailer which should be updatable. """ - resp = self.conn.post( - self._base_endpoint("mailer", entity_id=mailer_id) + "/copy" - ) - return resp + resp = self.conn.post(self._base_endpoint('mailer', entity_id=mailer_id) + '/copy') + return(resp) def update_mailing(self, mailer_id, **kwargs): """ @@ -654,10 +560,8 @@ def update_mailing(self, mailer_id, **kwargs): ``None`` """ - resp = self.conn.patch( - self._base_endpoint("mailer", mailer_id), data=json.dumps(kwargs) - ) - logger.info(f"{resp.status_code}: {mailer_id}") + resp = self.conn.patch(self._base_endpoint('mailer', mailer_id), data=json.dumps(kwargs)) + logger.info(f'{resp.status_code}: {mailer_id}') def rebuild_mailer(self, mailing_id): """ @@ -670,10 +574,8 @@ def rebuild_mailer(self, mailing_id): URI to poll for progress """ - return self._base_post( - endpoint="mailer/" + str(mailing_id) + "/rebuild", - exception_message="Could not rebuild mailer", - ) + return self._base_post(endpoint='mailer/' + str(mailing_id) + '/rebuild', + exception_message='Could not rebuild mailer') def queue_mailer(self, mailing_id): """ @@ -686,138 +588,8 @@ def queue_mailer(self, mailing_id): URI to poll for progress """ - return self._base_post( - endpoint="mailer/" + str(mailing_id) + "/queue", - exception_message="Could not queue mailer", - ) - - def paginated_get(self, object_type, limit=None, **kwargs): - """Get multiple objects of a given type. - - `Args:` - object_type: string - The type of object to search for. - limit: int - The number of objects to return. If omitted, all objects are returned. - **kwargs: - Optional arguments to pass to the client. A full list can be found - in the `ActionKit API Documentation `_. - - Additionally, expressions to filter the data can also be provided. For addition - info, visit `Django's docs on field lookups `_. - - .. code-block:: python - - ak.paginated_get(name__contains="FirstName") - `Returns:` - Parsons.Table - The objects data. - """ - # "The maximum number of objects returned per request is 100. Use paging - # to get more objects." - # (https://roboticdogs.actionkit.com/docs//manual/api/rest/overview.html#ordering) - # get only `limit` objects if it's below 100, otherwise get 100 at a time - kwargs["_limit"] = min(100, limit or 1_000_000_000) - json_data = self._base_get(object_type, params=kwargs) - data = json_data["objects"] - - next_url = json_data.get("meta", {}).get("next") - while next_url: - resp = self.conn.get(f"https://{self.domain}{next_url}") - data.extend(resp.json().get("objects", [])) - next_url = resp.json().get("meta", {}).get("next") - if limit and len(data) >= limit: - break - - return Table(data[:limit]) - - def paginated_get_custom_limit( - self, - object_type, - limit=None, - threshold_field=None, - threshold_value=None, - ascdesc="asc", - **kwargs, - ): - """Get multiple objects of a given type, stopping based on the value of a field. - - `Args:` - object_type: string - The type of object to search for. - limit: int - The maximum number of objects to return. Even if the threshold - value is not reached, if the limit is set, then at most this many - objects will be returned. - threshold_field: string - The field used to determine when to stop. - Must be one of the options for ordering by. - threshold_value: string - The value of the field to stop at. - ascdesc: string - If "asc" (the default), return all objects below the threshold value. - If "desc", return all objects above the threshold value. - **kwargs: - You can also add expressions to filter the data beyond the limit/threshold values - above. For additional info, visit `Django's docs on field lookups - `_. - - .. code-block:: python - - ak.paginated_get(name__contains="FirstName") - `Returns:` - Parsons.Table - The objects data. - """ - # "The maximum number of objects returned per request is 100. Use paging - # to get more objects." - # (https://roboticdogs.actionkit.com/docs//manual/api/rest/overview.html#ordering) - kwargs["_limit"] = min(100, limit or 1_000_000_000) - if ascdesc == "asc": - kwargs["order_by"] = threshold_field - else: - kwargs["order_by"] = "-" + threshold_field - json_data = self._base_get(object_type, params=kwargs) - data = json_data["objects"] - next_url = json_data.get("meta", {}).get("next") - while next_url: - last = data[-1].get(threshold_field) - if ascdesc == "asc" and last > threshold_value: - break - if ascdesc == "desc" and last < threshold_value: - break - resp = self.conn.get(f"https://{self.domain}{next_url}") - data += resp.json().get("objects", []) - next_url = resp.json().get("meta", {}).get("next") - if limit and len(data) >= limit: - break - # This could be more efficient but it's still O(n) so no big deal - i = len(data) - 1 # start at the end; 0-indexed means the end is length - 1 - if ascdesc == "asc": - while data[i].get(threshold_field) > threshold_value: - i = i - 1 - else: - while data[i].get(threshold_field) < threshold_value: - i = i - 1 - data = data[:i] - return Table(data[:limit]) - - def get_order(self, order_id): - """ - Get an order. - - `Args:` - order_id: int - The order id of the record to get. - `Returns`: - User json object - """ - - return self._base_get( - endpoint="order", entity_id=order_id, exception_message="Order not found" - ) + return self._base_post(endpoint='mailer/' + str(mailing_id) + '/queue', + exception_message='Could not queue mailer') def update_order(self, order_id, **kwargs): """ @@ -834,111 +606,9 @@ def update_order(self, order_id, **kwargs): ``None`` """ - resp = self.conn.patch( - self._base_endpoint("order", order_id), data=json.dumps(kwargs) - ) - logger.info(f"{resp.status_code}: {order_id}") - - def get_orderrecurring(self, orderrecurring_id): - """ - Get an orderrecurring. - - `Args:` - orderrecurring_id: int - The orderrecurring id of the record to get. - `Returns`: - User json object - """ - - return self._base_get( - endpoint="orderrecurring", - entity_id=orderrecurring_id, - exception_message="Orderrecurring not found", - ) - - def cancel_orderrecurring(self, recurring_id): - """ - Cancel a recurring order. - - `Args:` - recurring_id: int - The id of the recurring order to update (NOT the order_id) - `Returns:` - ``None`` - """ - - resp = self.conn.post( - self._base_endpoint("orderrecurring", str(recurring_id) + "/cancel") - ) - logger.info(f"{resp.status_code}: {recurring_id}") - return resp - - def update_orderrecurring(self, orderrecurring_id, **kwargs): - """ - Update a recurring order. - - `Args:` - orderrecurring_id: int - The id of the orderrecurring to update - **kwargs: - Optional arguments and fields to pass to the client. A full list can be found - in the `ActionKit API Documentation `_. - `Returns:` - ``None`` - """ - - resp = self.conn.patch( - self._base_endpoint("orderrecurring", orderrecurring_id), - data=json.dumps(kwargs), - ) - logger.info(f"{resp.status_code}: {orderrecurring_id}") - - def get_orders(self, limit=None, **kwargs): - """Get multiple orders. - - `Args:` - limit: int - The number of orders to return. If omitted, all orders are returned. - **kwargs: - Optional arguments to pass to the client. A full list can be found - in the `ActionKit API Documentation `_. - - Additionally, expressions to filter the data can also be provided. For addition - info, visit `Django's docs on field lookups `_. - - .. code-block:: python - - ak.get_orders(import_id="my-import-123") - `Returns:` - Parsons.Table - The orders data. - """ - return self.paginated_get("order", limit=limit, **kwargs) - - def update_paymenttoken(self, paymenttoken_id, **kwargs): - """ - Update a saved payment token. - - `Args:` - paymenttoken_id: int - The id of the payment token to update - **kwargs: - Optional arguments and fields to pass to the client. A full list can be found - in the `ActionKit API Documentation `_. - `Returns:` - ``HTTP response`` - """ - - resp = self.conn.patch( - self._base_endpoint("paymenttoken", paymenttoken_id), - data=json.dumps(kwargs), - ) - logger.info(f"{resp.status_code}: {paymenttoken_id}") - return resp + resp = self.conn.patch(self._base_endpoint('order', order_id), + data=json.dumps(kwargs)) + logger.info(f'{resp.status_code}: {order_id}') def get_page_followup(self, page_followup_id): """ @@ -951,11 +621,8 @@ def get_page_followup(self, page_followup_id): Page followup json object """ - return self._base_get( - endpoint="pagefollowup", - entity_id=page_followup_id, - exception_message="Page followup not found", - ) + return self._base_get(endpoint='pagefollowup', entity_id=page_followup_id, + exception_message='Page followup not found') def get_page_followup_fields(self): """ @@ -966,8 +633,8 @@ def get_page_followup_fields(self): List of page followup fields """ - resp = self._base_get(endpoint="pagefollowup/schema") - return list(resp["fields"].keys()) + resp = self._base_get(endpoint='pagefollowup/schema') + return list(resp['fields'].keys()) def create_page_followup(self, signup_page_id, url, **kwargs): """ @@ -986,13 +653,11 @@ def create_page_followup(self, signup_page_id, url, **kwargs): API location of new resource """ - return self._base_post( - endpoint="pagefollowup", - exception_message="Could not create page followup", - page=f"/rest/v1/eventsignuppage/{signup_page_id}/", - url=url, - **kwargs, - ) + return self._base_post(endpoint='pagefollowup', + exception_message='Could not create page followup', + page=f'/rest/v1/eventsignuppage/{signup_page_id}/', + url=url, + **kwargs) def get_survey_question(self, survey_question_id): """ @@ -1005,11 +670,8 @@ def get_survey_question(self, survey_question_id): Survey question json object """ - return self._base_get( - endpoint="surveyquestion", - entity_id=survey_question_id, - exception_message="Survey question not found", - ) + return self._base_get(endpoint='surveyquestion', entity_id=survey_question_id, + exception_message='Survey question not found') def update_survey_question(self, survey_question_id, **kwargs): """ @@ -1028,28 +690,9 @@ def update_survey_question(self, survey_question_id, **kwargs): ``None`` """ - resp = self.conn.patch( - self._base_endpoint("surveyquestion", survey_question_id), - data=json.dumps(kwargs), - ) - logger.info(f"{resp.status_code}: {survey_question_id}") - - def create_transaction(self, **kwargs): - """ - Create a transaction. - - `Args:` - **kwargs: - Optional arguments and fields to pass to the client. - `Returns:` - Transaction json object - """ - - return self._base_post( - endpoint="transaction", - exception_message="Could not create transaction", - **kwargs, - ) + resp = self.conn.patch(self._base_endpoint('surveyquestion', survey_question_id), + data=json.dumps(kwargs)) + logger.info(f'{resp.status_code}: {survey_question_id}') def update_transaction(self, transaction_id, **kwargs): """ @@ -1066,34 +709,9 @@ def update_transaction(self, transaction_id, **kwargs): ``None`` """ - resp = self.conn.patch( - self._base_endpoint("transaction", transaction_id), data=json.dumps(kwargs) - ) - logger.info(f"{resp.status_code}: {transaction_id}") - - def get_transactions(self, limit=None, **kwargs): - """Get multiple transactions. - - `Args:` - limit: int - The number of transactions to return. If omitted, all transactions are returned. - **kwargs: - Optional arguments to pass to the client. A full list can be found - in the `ActionKit API Documentation `_. - - Additionally, expressions to filter the data can also be provided. For addition - info, visit `Django's docs on field lookups `_. - - .. code-block:: python - - ak.get_transactions(order="order-1") - `Returns:` - Parsons.Table - The transactions data. - """ - return self.paginated_get("transaction", limit=limit, **kwargs) + resp = self.conn.patch(self._base_endpoint('transaction', transaction_id), + data=json.dumps(kwargs)) + logger.info(f'{resp.status_code}: {transaction_id}') def create_generic_action(self, page, email=None, ak_id=None, **kwargs): """ @@ -1113,27 +731,20 @@ def create_generic_action(self, page, email=None, ak_id=None, **kwargs): `Returns`: dict The response json - """ # noqa: E501,E261 + """ # noqa: E501,E261 if not email or ak_id: - raise ValueError("One of email or ak_id is required.") - - return self._base_post( - endpoint="action", - exception_message="Could not create action.", - email=email, - page=page, - return_full_json=True, - **kwargs, - ) - - def bulk_upload_csv( - self, - csv_file, - import_page, - autocreate_user_fields=False, - user_fields_only=False, - ): + raise ValueError('One of email or ak_id is required.') + + return self._base_post(endpoint='action', + exception_message='Could not create action.', + email=email, + page=page, + return_full_json=True, + **kwargs) + + def bulk_upload_csv(self, csv_file, import_page, + autocreate_user_fields=False, user_fields_only=False): """ Bulk upload a csv file of new users or user updates. If you are uploading a table object, use bulk_upload_table instead. @@ -1163,38 +774,32 @@ def bulk_upload_csv( success: whether upload was successful progress_url: an API URL to get progress on upload processing res: requests http response object - """ # noqa: E501,E261 + """ # noqa: E501,E261 # self.conn defaults to JSON, but this has to be form/multi-part.... - upload_client = self._conn({"accepts": "application/json"}) + upload_client = self._conn({'accepts': 'application/json'}) if isinstance(csv_file, str): - csv_file = open(csv_file, "rb") + csv_file = open(csv_file, 'rb') - url = self._base_endpoint("upload") - files = {"upload": csv_file} + url = self._base_endpoint('upload') + files = {'upload': csv_file} data = { - "page": import_page, - "autocreate_user_fields": int(autocreate_user_fields), - "user_fields_only": int(user_fields_only), + 'page': import_page, + 'autocreate_user_fields': int(autocreate_user_fields), + 'user_fields_only': int(user_fields_only), } with upload_client.post(url, files=files, data=data) as res: - progress_url = res.headers.get("Location") + progress_url = res.headers.get('Location') rv = { - "res": res, - "success": res.status_code == 201, - "id": progress_url.split("/")[-2] if progress_url else None, - "progress_url": progress_url, + 'res': res, + 'success': res.status_code == 201, + 'id': progress_url.split('/')[-2] if progress_url else None, + 'progress_url': progress_url } return rv - def bulk_upload_table( - self, - table, - import_page, - autocreate_user_fields=0, - no_overwrite_on_empty=False, - set_only_columns=None, - ): + def bulk_upload_table(self, table, import_page, autocreate_user_fields=0, + no_overwrite_on_empty=False, set_only_columns=None): """ Bulk upload a table of new users or user updates. See `ActionKit User Upload Documentation `_ @@ -1232,43 +837,35 @@ def bulk_upload_table( success: bool -- whether upload was successful (individual rows may not have been) results: [dict] -- This is a list of the full results. progress_url and res for any results - """ # noqa: E501,E261 + """ # noqa: E501,E261 - import_page = check_env.check("ACTION_KIT_IMPORTPAGE", import_page) + import_page = check_env.check('ACTION_KIT_IMPORTPAGE', import_page) upload_tables = self._split_tables_no_empties( - table, no_overwrite_on_empty, set_only_columns - ) + table, no_overwrite_on_empty, set_only_columns) results = [] for tbl in upload_tables: - user_fields_only = int( - not any( - [ - h - for h in tbl.columns - if h != "email" and not h.startswith("user_") - ] - ) - ) - results.append( - self.bulk_upload_csv( - tbl.to_csv(), - import_page, - autocreate_user_fields=autocreate_user_fields, - user_fields_only=user_fields_only, - ) - ) - return {"success": all([r["success"] for r in results]), "results": results} + user_fields_only = int(not any([ + h for h in tbl.columns + if h != 'email' and not h.startswith('user_')])) + results.append(self.bulk_upload_csv(tbl.to_csv(), + import_page, + autocreate_user_fields=autocreate_user_fields, + user_fields_only=user_fields_only)) + return { + 'success': all([r['success'] for r in results]), + 'results': results + } def _split_tables_no_empties(self, table, no_overwrite_on_empty, set_only_columns): table_groups = {} # uploading combo of user_id and email column should be mutually exclusive blank_columns_test = table.columns if not no_overwrite_on_empty: - blank_columns_test = set( - ["user_id", "email"] + (set_only_columns or []) - ).intersection(table.columns) + blank_columns_test = (set(['user_id', 'email'] + (set_only_columns or [])) + .intersection(table.columns)) for row in table: - blanks = tuple(k for k in blank_columns_test if row.get(k) in (None, "")) + blanks = tuple(k for k in blank_columns_test + if row.get(k) in (None, '')) grp = table_groups.setdefault(blanks, []) grp.append(row) results = [] @@ -1276,12 +873,12 @@ def _split_tables_no_empties(self, table, no_overwrite_on_empty, set_only_column subset_table = Table(subset) if blanks: subset_table.table = subset_table.table.cutout(*blanks) - logger.debug(f"Column Upload Blanks: {blanks}") - logger.debug(f"Column Upload Columns: {subset_table.columns}") - if not set(["user_id", "email"]).intersection(subset_table.columns): + logger.debug(f'Column Upload Blanks: {blanks}') + logger.debug(f'Column Upload Columns: {subset_table.columns}') + if not set(['user_id', 'email']).intersection(subset_table.columns): logger.warning( - f"Upload will fail without user_id or email. " - f"Rows: {subset_table.num_rows}, Columns: {subset_table.columns}" + f'Upload will fail without user_id or email. ' + f'Rows: {subset_table.num_rows}, Columns: {subset_table.columns}' ) results.append(subset_table) return results @@ -1304,17 +901,15 @@ def collect_upload_errors(self, result_array): """ errors = [] for res in result_array: - upload_id = res.get("id") + upload_id = res.get('id') if upload_id: while True: - upload = self._base_get(endpoint="upload", entity_id=upload_id) - if not upload or upload.get("status") != "new": + upload = self._base_get(endpoint='upload', entity_id=upload_id) + if not upload or upload.get('status') != 'new': break else: time.sleep(1) - error_data = self._base_get( - endpoint="uploaderror", params={"upload": upload_id} - ) - logger.debug(f"error collect result: {error_data}") - errors.extend(error_data.get("objects") or []) + error_data = self._base_get(endpoint='uploaderror', params={'upload': upload_id}) + logger.debug(f'error collect result: {error_data}') + errors.extend(error_data.get('objects') or []) return errors diff --git a/parsons/action_network/__init__.py b/parsons/action_network/__init__.py index c13da95c5d..39fa909015 100644 --- a/parsons/action_network/__init__.py +++ b/parsons/action_network/__init__.py @@ -1,3 +1,5 @@ from parsons.action_network.action_network import ActionNetwork -__all__ = ["ActionNetwork"] +__all__ = [ + 'ActionNetwork' +] diff --git a/parsons/action_network/action_network.py b/parsons/action_network/action_network.py index 9c2e499e9e..387f092552 100644 --- a/parsons/action_network/action_network.py +++ b/parsons/action_network/action_network.py @@ -1,15 +1,13 @@ import json -import logging -import re -import warnings - from parsons import Table +import re from parsons.utilities import check_env from parsons.utilities.api_connector import APIConnector +import logging logger = logging.getLogger(__name__) -API_URL = "https://actionnetwork.org/api/v2" +API_URL = 'https://actionnetwork.org/api/v2' class ActionNetwork(object): @@ -18,40 +16,33 @@ class ActionNetwork(object): api_token: str The OSDI API token """ - def __init__(self, api_token=None): - self.api_token = check_env.check("AN_API_TOKEN", api_token) + self.api_token = check_env.check('AN_API_TOKEN', api_token) self.headers = { "Content-Type": "application/json", - "OSDI-API-Token": self.api_token, + "OSDI-API-Token": self.api_token } self.api_url = API_URL self.api = APIConnector(self.api_url, headers=self.headers) - def _get_page(self, object_name, page, per_page=25, filter=None): + def _get_page(self, object_name, page, per_page=25): # returns data from one page of results if per_page > 25: per_page = 25 - logger.info( - "Action Network's API will not return more than 25 entries per page. \ - Changing per_page parameter to 25." - ) - params = {"page": page, "per_page": per_page, "filter": filter} - return self.api.get_request(url=object_name, params=params) + logger.info("Action Network's API will not return more than 25 entries per page. \ + Changing per_page parameter to 25.") + page_url = f"{object_name}?page={page}&per_page={per_page}" + return self.api.get_request(url=page_url) - def _get_entry_list(self, object_name, limit=None, per_page=25, filter=None): + def _get_entry_list(self, object_name, limit=None, per_page=25): # returns a list of entries for a given object, such as people, tags, or actions - # Filter can only be applied to people, petitions, events, forms, fundraising_pages, - # event_campaigns, campaigns, advocacy_campaigns, signatures, attendances, submissions, - # donations and outreaches. - # See Action Network API docs for more info: https://actionnetwork.org/docs/v2/ count = 0 page = 1 return_list = [] while True: - response = self._get_page(object_name, page, per_page, filter=filter) + response = self._get_page(object_name, page, per_page) page = page + 1 - response_list = response["_embedded"][f"osdi:{object_name}"] + response_list = response['_embedded'][f"osdi:{object_name}"] if not response_list: return Table(return_list) return_list.extend(response_list) @@ -60,7 +51,7 @@ def _get_entry_list(self, object_name, limit=None, per_page=25, filter=None): if count >= limit: return Table(return_list[0:limit]) - def get_people(self, limit=None, per_page=25, page=None, filter=None): + def get_people(self, limit=None, per_page=25, page=None): """ `Args:` limit: @@ -69,15 +60,12 @@ def get_people(self, limit=None, per_page=25, page=None, filter=None): The number of entries per page to return. 25 maximum. page Which page of results to return - filter - The OData query for filtering results. E.g. "modified_date gt '2014-03-25'". - When None, no filter is applied. `Returns:` A list of JSONs of people stored in Action Network. """ if page: - return self._get_page("people", page, per_page, filter=filter) - return self._get_entry_list("people", limit, per_page, filter=filter) + self._get_page("people", page, per_page) + return self._get_entry_list("people", limit, per_page) def get_person(self, person_id): """ @@ -90,29 +78,10 @@ def get_person(self, person_id): """ return self.api.get_request(url=f"people/{person_id}") - def upsert_person( - self, - email_address=None, - given_name=None, - family_name=None, - tags=None, - languages_spoken=None, - postal_addresses=None, - mobile_number=None, - mobile_status="subscribed", - background_processing=False, - **kwargs, - ): + def add_person(self, email_address=None, given_name=None, family_name=None, tags=None, + languages_spoken=None, postal_addresses=None, mobile_number=None, + mobile_status='subscribed', **kwargs): """ - Creates or updates a person record. In order to update an existing record instead of - creating a new one, you must supply an email or mobile number which matches a record - in the database. - - Identifiers are intentionally not included as an option on - this method, because their use can cause buggy behavior if - they are not globally unique. ActionNetwork support strongly - encourages developers not to use custom identifiers. - `Args:` email_address: Either email_address or mobile_number are required. Can be any of the following @@ -133,7 +102,7 @@ def upsert_person( family_name: The person's family name tags: - Optional field. A list of strings of pre-existing tags to be applied to the person. + Any tags to be applied to the person languages_spoken: Optional field. A list of strings of the languages spoken by the person postal_addresses: @@ -154,58 +123,46 @@ def upsert_person( - "unsubscribed" mobile_status: 'subscribed' or 'unsubscribed' - background_request: bool - If set `true`, utilize ActionNetwork's "background processing". This will return - an immediate success, with an empty JSON body, and send your request to the - background queue for eventual processing. - https://actionnetwork.org/docs/v2/#background-processing **kwargs: Any additional fields to store about the person. Action Network allows any custom field. Adds a person to Action Network """ email_addresses_field = None - if isinstance(email_address, str): + if type(email_address) == str: email_addresses_field = [{"address": email_address}] - elif isinstance(email_address, list): - if isinstance(email_address[0], str): + elif type(email_address) == list: + if type(email_address[0]) == str: email_addresses_field = [{"address": email} for email in email_address] - email_addresses_field[0]["primary"] = True - if isinstance(email_address[0], dict): + email_addresses_field[0]['primary'] = True + if type(email_address[0]) == dict: email_addresses_field = email_address mobile_numbers_field = None - if isinstance(mobile_number, str): - mobile_numbers_field = [ - {"number": re.sub("[^0-9]", "", mobile_number), "status": mobile_status} - ] - elif isinstance(mobile_number, int): - mobile_numbers_field = [ - {"number": str(mobile_number), "status": mobile_status} - ] - elif isinstance(mobile_number, list): + if type(mobile_number) == str: + mobile_numbers_field = [{"number": re.sub('[^0-9]', "", mobile_number), + "status": mobile_status}] + elif type(mobile_number) == int: + mobile_numbers_field = [{"number": str(mobile_number), "status": mobile_status}] + elif type(mobile_number) == list: if len(mobile_number) > 1: - raise ("Action Network allows only 1 phone number per activist") - if isinstance(mobile_number[0], list): - mobile_numbers_field = [ - {"number": re.sub("[^0-9]", "", cell), "status": mobile_status} - for cell in mobile_number - ] - mobile_numbers_field[0]["primary"] = True - if isinstance(mobile_number[0], int): - mobile_numbers_field = [ - {"number": cell, "status": mobile_status} for cell in mobile_number - ] - mobile_numbers_field[0]["primary"] = True - if isinstance(mobile_number[0], dict): + raise('Action Network allows only 1 phone number per activist') + if type(mobile_number[0]) == str: + mobile_numbers_field = [{"number": re.sub('[^0-9]', "", cell), + "status": mobile_status} + for cell in mobile_number] + mobile_numbers_field[0]['primary'] = True + if type(mobile_number[0]) == int: + mobile_numbers_field = [{"number": cell, "status": mobile_status} + for cell in mobile_number] + mobile_numbers_field[0]['primary'] = True + if type(mobile_number[0]) == dict: mobile_numbers_field = mobile_number if not email_addresses_field and not mobile_numbers_field: - raise ( - "Either email_address or mobile_number is required and can be formatted " - "as a string, list of strings, a dictionary, a list of dictionaries, or " - "(for mobile_number only) an integer or list of integers" - ) + raise("Either email_address or mobile_number is required and can be formatted " + "as a string, list of strings, a dictionary, a list of dictionaries, or " + "(for mobile_number only) an integer or list of integers") data = {"person": {}} @@ -220,76 +177,22 @@ def upsert_person( if languages_spoken is not None: data["person"]["languages_spoken"] = languages_spoken if postal_addresses is not None: - data["person"]["postal_addresses"] = postal_addresses + data["person"]["postal_address"] = postal_addresses if tags is not None: data["add_tags"] = tags - data["person"]["custom_fields"] = {**kwargs} - url = f"{self.api_url}/people" - if background_processing: - url = f"{url}?background_processing=true" - response = self.api.post_request(url, data=json.dumps(data)) - - identifiers = response["identifiers"] - person_id = [ - entry_id.split(":")[1] - for entry_id in identifiers - if "action_network:" in entry_id - ] - if not person_id: - logger.error(f"Response gave no valid person_id: {identifiers}") - else: - person_id = person_id[0] - if response["created_date"] == response["modified_date"]: - logger.info(f"Entry {person_id} successfully added.") - else: - logger.info(f"Entry {person_id} successfully updated.") + response = self.api.post_request(url=f"{self.api_url}/people", data=json.dumps(data)) + identifiers = response['identifiers'] + person_id = [entry_id.split(':')[1] + for entry_id in identifiers if 'action_network:' in entry_id][0] + logger.info(f"Entry {person_id} successfully added to people.") return response - def add_person( - self, - email_address=None, - given_name=None, - family_name=None, - tags=None, - languages_spoken=None, - postal_addresses=None, - mobile_number=None, - mobile_status="subscribed", - **kwargs, - ): - """ - Creates a person in the database. WARNING: this endpoint has been deprecated in favor of - upsert_person. + def update_person(self, entry_id, **kwargs): """ - logger.warning( - "Method 'add_person' has been deprecated. Please use 'upsert_person'." - ) - # Pass inputs to preferred method: - self.upsert_person( - email_address=email_address, - given_name=given_name, - family_name=family_name, - languages_spoken=languages_spoken, - postal_addresses=postal_addresses, - mobile_number=mobile_number, - mobile_status=mobile_status, - **kwargs, - ) - - def update_person(self, entry_id, background_processing=False, **kwargs): - """ - Updates a person's data in Action Network, given their Action Network ID. Note that you - can't alter a person's tags with this method. Instead, use upsert_person. - `Args:` entry_id: The person's Action Network id - background_processing: bool - If set `true`, utilize ActionNetwork's "background processing". This will return - an immediate success, with an empty JSON body, and send your request to the - background queue for eventual processing. - https://actionnetwork.org/docs/v2/#background-processing **kwargs: Fields to be updated. The possible fields are email_address: @@ -310,6 +213,8 @@ def update_person(self, entry_id, background_processing=False, **kwargs): The person's given name family_name: The person's family name + tags: + Any tags to be applied to the person languages_spoken: Optional field. A list of strings of the languages spoken by the person postal_addresses: @@ -318,36 +223,29 @@ def update_person(self, entry_id, background_processing=False, **kwargs): https://actionnetwork.org/docs/v2/people#put custom_fields: A dictionary of any other fields to store about the person. + Updates a person's data in Action Network """ data = {**kwargs} - url = f"{self.api_url}/people/{entry_id}" - if background_processing: - url = f"{url}?background_processing=true" - response = self.api.put_request( - url=url, - data=json.dumps(data), - success_codes=[204, 201, 200], - ) + response = self.api.put_request(url=f"{self.api_url}/people/{entry_id}", + json=json.dumps(data), success_codes=[204, 201, 200]) logger.info(f"Person {entry_id} successfully updated") return response - def get_tags(self, limit=None, per_page=None): + def get_tags(self, limit=None, per_page=25, page=None): """ `Args:` limit: The number of entries to return. When None, returns all entries. - per_page: - This is a deprecated argument. + per_page + The number of entries per page to return. 25 maximum. + page + Which page of results to return `Returns:` A list of JSONs of tags in Action Network. """ - if per_page: - warnings.warn( - "per_page is a deprecated argument on get_tags()", - DeprecationWarning, - stacklevel=2, - ) - return self._get_entry_list("tags", limit) + if page: + self.get_page("tags", page, per_page) + return self._get_entry_list("tags", limit, per_page) def get_tag(self, tag_id): """ @@ -367,16 +265,13 @@ def add_tag(self, name): The tag's name. This is the ONLY editable field Adds a tag to Action Network. Once created, tags CANNOT be edited or deleted. """ - data = {"name": name} - response = self.api.post_request( - url=f"{self.api_url}/tags", data=json.dumps(data) - ) - identifiers = response["identifiers"] - person_id = [ - entry_id.split(":")[1] - for entry_id in identifiers - if "action_network:" in entry_id - ][0] + data = { + "name": name + } + response = self.api.post_request(url=f"{self.api_url}/tags", data=json.dumps(data)) + identifiers = response['identifiers'] + person_id = [entry_id.split(':')[1] + for entry_id in identifiers if 'action_network:' in entry_id][0] logger.info(f"Tag {person_id} successfully added to tags.") return response @@ -410,7 +305,9 @@ def create_event(self, title, start_date=None, location=None): Dict of Action Network Event data. """ - data = {"title": title} + data = { + "title": title + } if start_date: start_date = str(start_date) @@ -419,11 +316,9 @@ def create_event(self, title, start_date=None, location=None): if isinstance(location, dict): data["location"] = location - event_dict = self.api.post_request( - url=f"{self.api_url}/events", data=json.dumps(data) - ) + event_dict = self.api.post_request(url=f"{self.api_url}/events", data=json.dumps(data)) - an_event_id = event_dict["_links"]["self"]["href"].split("/")[-1] + an_event_id = event_dict["_links"]["self"]["href"].split('/')[-1] event_dict["event_id"] = an_event_id return event_dict diff --git a/parsons/airtable/__init__.py b/parsons/airtable/__init__.py index 2f60d0744b..9c9fb130bf 100644 --- a/parsons/airtable/__init__.py +++ b/parsons/airtable/__init__.py @@ -1,3 +1,5 @@ from parsons.airtable.airtable import Airtable -__all__ = ["Airtable"] +__all__ = [ + 'Airtable' +] diff --git a/parsons/airtable/airtable.py b/parsons/airtable/airtable.py index 3465d4cd26..4368c587eb 100644 --- a/parsons/airtable/airtable.py +++ b/parsons/airtable/airtable.py @@ -21,7 +21,7 @@ class Airtable(object): def __init__(self, base_key, table_name, api_key=None): - self.api_key = check_env.check("AIRTABLE_API_KEY", api_key) + self.api_key = check_env.check('AIRTABLE_API_KEY', api_key) self.client = client(base_key, table_name, self.api_key) def get_record(self, record_id): @@ -37,15 +37,8 @@ def get_record(self, record_id): return self.client.get(record_id) - def get_records( - self, - fields=None, - max_records=None, - view=None, - formula=None, - sort=None, - sample_size=None, - ): + def get_records(self, fields=None, max_records=None, view=None, + formula=None, sort=None, sample_size=None): """ `Args:` fields: str or lst @@ -95,30 +88,25 @@ def get_records( """ # Raises an error if sort is None type. Thus, only adding if populated. - kwargs = { - "fields": fields, - "max_records": max_records, - "view": view, - "formula": formula, - } + kwargs = {'fields': fields, 'max_records': max_records, 'view': view, 'formula': formula} if sort: - kwargs["sort"] = sort + kwargs['sort'] = sort tbl = Table(self.client.get_all(**kwargs)) # If the results are empty, then return an empty table. - if "fields" not in tbl.columns: + if 'fields' not in tbl.columns: return Table([[]]) unpack_dicts_kwargs = { - "column": "fields", - "prepend": False, + 'column': 'fields', + 'prepend': False, } if fields: - unpack_dicts_kwargs["keys"] = fields + unpack_dicts_kwargs['keys'] = fields if sample_size: - unpack_dicts_kwargs["sample_size"] = sample_size + unpack_dicts_kwargs['sample_size'] = sample_size return tbl.unpack_dict(**unpack_dicts_kwargs) @@ -137,7 +125,7 @@ def insert_record(self, row): """ resp = self.client.insert(row) - logger.info("Record inserted") + logger.info('Record inserted') return resp def insert_records(self, table, typecast=False): @@ -156,7 +144,7 @@ def insert_records(self, table, typecast=False): """ resp = self.client.batch_insert(table, typecast=typecast) - logger.info(f"{table.num_rows} records inserted.") + logger.info(f'{table.num_rows} records inserted.') return resp def update_record(self, record_id, fields, typecast=False): @@ -176,5 +164,5 @@ def update_record(self, record_id, fields, typecast=False): """ resp = self.client.update(record_id, fields, typecast=typecast) - logger.info(f"{record_id} updated") + logger.info(f'{record_id} updated') return resp diff --git a/parsons/alchemer/__init__.py b/parsons/alchemer/__init__.py index 87d961dd4d..efb586de3a 100644 --- a/parsons/alchemer/__init__.py +++ b/parsons/alchemer/__init__.py @@ -1,3 +1,3 @@ from parsons.alchemer.alchemer import Alchemer, SurveyGizmo -__all__ = ["SurveyGizmo", "Alchemer"] +__all__ = ['SurveyGizmo', 'Alchemer'] diff --git a/parsons/alchemer/alchemer.py b/parsons/alchemer/alchemer.py index cf81c1fb4b..bc8cf1f0eb 100644 --- a/parsons/alchemer/alchemer.py +++ b/parsons/alchemer/alchemer.py @@ -10,17 +10,14 @@ def sg_compatibility(): # Create backwards compatibility with SurveyGizmo class import os + if os.getenv('SURVEYGIZMO_API_TOKEN'): + os.environ['ALCHEMER_API_TOKEN'] = os.getenv('SURVEYGIZMO_API_TOKEN') - if os.getenv("SURVEYGIZMO_API_TOKEN"): - os.environ["ALCHEMER_API_TOKEN"] = os.getenv("SURVEYGIZMO_API_TOKEN") + if os.getenv('SURVEYGIZMO_API_TOKEN_SECRET'): + os.environ['ALCHEMER_API_TOKEN_SECRET'] = os.getenv('SURVEYGIZMO_API_TOKEN_SECRET') - if os.getenv("SURVEYGIZMO_API_TOKEN_SECRET"): - os.environ["ALCHEMER_API_TOKEN_SECRET"] = os.getenv( - "SURVEYGIZMO_API_TOKEN_SECRET" - ) - - if os.getenv("SURVEYGIZMO_API_VERSION"): - os.environ["ALCHEMER_API_VERSION"] = os.getenv("SURVEYGIZMO_API_VERSION") + if os.getenv('SURVEYGIZMO_API_VERSION'): + os.environ['ALCHEMER_API_VERSION'] = os.getenv('SURVEYGIZMO_API_VERSION') class Alchemer(object): @@ -45,21 +42,19 @@ class Alchemer(object): Alchemer Class """ - def __init__(self, api_token=None, api_token_secret=None, api_version="v5"): + def __init__(self, api_token=None, api_token_secret=None, api_version='v5'): sg_compatibility() - self.api_token = check_env.check("ALCHEMER_API_TOKEN", api_token) - self.api_token_secret = check_env.check( - "ALCHEMER_API_TOKEN_SECRET", api_token_secret - ) - self.api_version = check_env.check("ALCHEMER_API_VERSION", api_version) + self.api_token = check_env.check('ALCHEMER_API_TOKEN', api_token) + self.api_token_secret = check_env.check('ALCHEMER_API_TOKEN_SECRET', api_token_secret) + self.api_version = check_env.check('ALCHEMER_API_VERSION', api_version) self._client = surveygizmo.SurveyGizmo( - api_version=self.api_version, - api_token=self.api_token, - api_token_secret=self.api_token_secret, - ) + api_version=self.api_version, + api_token=self.api_token, + api_token_secret=self.api_token_secret + ) def get_surveys(self, page=None): """ @@ -75,15 +70,15 @@ def get_surveys(self, page=None): """ r = self._client.api.survey.list(page) - data = r["data"] + data = r['data'] if not page: - while r["page"] < r["total_pages"]: - r = self._client.api.survey.list(page=(r["page"] + 1)) - data.extend(r["data"]) + while r['page'] < r['total_pages']: + r = self._client.api.survey.list(page=(r['page']+1)) + data.extend(r['data']) - tbl = Table(data).remove_column("links") - tbl.unpack_dict("statistics", prepend=False) + tbl = Table(data).remove_column('links') + tbl.unpack_dict('statistics', prepend=False) logger.info(f"Found {tbl.num_rows} surveys.") @@ -107,16 +102,14 @@ def get_survey_responses(self, survey_id, page=None): r = self._client.api.surveyresponse.list(survey_id, page) logger.info(f"{survey_id}: {r['total_count']} responses.") - data = r["data"] + data = r['data'] if not page: - while r["page"] < r["total_pages"]: - r = self._client.api.surveyresponse.list( - survey_id, page=(r["page"] + 1) - ) - data.extend(r["data"]) + while r['page'] < r['total_pages']: + r = self._client.api.surveyresponse.list(survey_id, page=(r['page']+1)) + data.extend(r['data']) - tbl = Table(data).add_column("survey_id", survey_id, index=1) + tbl = Table(data).add_column('survey_id', survey_id, index=1) logger.info(f"Found #{tbl.num_rows} responses.") diff --git a/parsons/auth0/__init__.py b/parsons/auth0/__init__.py deleted file mode 100644 index b1e0ef7273..0000000000 --- a/parsons/auth0/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from parsons.auth0.auth0 import Auth0 - -__all__ = ["Auth0"] diff --git a/parsons/auth0/auth0.py b/parsons/auth0/auth0.py deleted file mode 100644 index 592686c3e2..0000000000 --- a/parsons/auth0/auth0.py +++ /dev/null @@ -1,129 +0,0 @@ -import json - -import requests -from parsons.etl.table import Table -from parsons.utilities import check_env - - -class Auth0(object): - """ - Instantiate the Auth0 class - - `Args:` - client_id: str - The Auth0 client ID. Not required if ``AUTH0_CLIENT_ID`` env variable set. - client_secret: str - The Auth0 client secret. Not required if ``AUTH0_CLIENT_SECRET`` env variable set. - domain: str - The Auth0 domain. Not required if ``AUTH0_DOMAIN`` env variable set. - `Returns:` - Auth0 Class - """ - - def __init__(self, client_id=None, client_secret=None, domain=None): - self.base_url = f"https://{check_env.check('AUTH0_DOMAIN', domain)}" - access_token = ( - requests.post( - f"{self.base_url}/oauth/token", - data={ - "grant_type": "client_credentials", # OAuth 2.0 flow to use - "client_id": check_env.check("AUTH0_CLIENT_ID", client_id), - "client_secret": check_env.check( - "AUTH0_CLIENT_SECRET", client_secret - ), - "audience": f"{self.base_url}/api/v2/", - }, - ) - .json() - .get("access_token") - ) - self.headers = { - "Authorization": f"Bearer {access_token}", - "Content-Type": "application/json", - } - - def delete_user(self, id): - """ - Delete Auth0 user. - - `Args:` - id: str - The user ID of the record to delete. - `Returns:` - int - """ - return requests.delete( - f"{self.base_url}/api/v2/users/{id}", headers=self.headers - ).status_code - - def get_users_by_email(self, email): - """ - Get Auth0 users by email. - - `Args:` - email: str - The user email of the record to get. - `Returns:` - Table Class - """ - url = f"{self.base_url}/api/v2/users-by-email" - val = requests.get(url, headers=self.headers, params={"email": email}) - if val.status_code == 429: - raise requests.exceptions.ConnectionError(val.json()["message"]) - return Table(val.json()) - - def upsert_user( - self, - email, - username=None, - given_name=None, - family_name=None, - app_metadata={}, - user_metadata={}, - ): - """ - Upsert Auth0 users by email. - - `Args:` - email: str - The user email of the record to get. - username: optional str - Username to set for user - given_name: optional str - Given to set for user - family_name: optional str - Family name to set for user - app_metadata: optional dict - App metadata to set for user - user_metadata: optional dict - User metadata to set for user - `Returns:` - Requests Response object - """ - payload = json.dumps( - { - "email": email.lower(), - "given_name": given_name, - "family_name": family_name, - "username": username, - "connection": "Username-Password-Authentication", - "app_metadata": app_metadata, - "blocked": False, - "user_metadata": user_metadata, - } - ) - existing = self.get_users_by_email(email.lower()) - if existing.num_rows > 0: - a0id = existing[0]["user_id"] - ret = requests.patch( - f"{self.base_url}/api/v2/users/{a0id}", - headers=self.headers, - data=payload, - ) - else: - ret = requests.post( - f"{self.base_url}/api/v2/users", headers=self.headers, data=payload - ) - if ret.status_code != 200: - raise ValueError(f"Invalid response {ret.json()}") - return ret diff --git a/parsons/aws/__init__.py b/parsons/aws/__init__.py index b0db4e9b5b..d71d4df09e 100644 --- a/parsons/aws/__init__.py +++ b/parsons/aws/__init__.py @@ -2,4 +2,8 @@ from parsons.aws.lambda_distribute import distribute_task from parsons.aws.aws_async import event_command -__all__ = ["S3", "distribute_task", "event_command"] +__all__ = [ + 'S3', + 'distribute_task', + 'event_command' +] diff --git a/parsons/aws/aws_async.py b/parsons/aws/aws_async.py index 68f1419420..412b6f6e45 100644 --- a/parsons/aws/aws_async.py +++ b/parsons/aws/aws_async.py @@ -34,96 +34,75 @@ def event_command(event, context): If you have deployed your app with `Zappa `_, then you do NOT need to add this shim. """ - if not set(event).intersection({"task_path", "args", "kwargs"}): + if not set(event).intersection({'task_path', 'args', 'kwargs'}): return False # did not match an event command - func = import_and_get_task(event["task_path"], event.get("func_class_init_kwargs")) + func = import_and_get_task(event['task_path'], event.get('func_class_init_kwargs')) # if the func was decorated with zappa.async.task then run the real function - func = getattr(func, "sync", func) + func = getattr(func, 'sync', func) # DID match an event command # -- so probably don't do the usual thing the Lambda handler does - return func(*event["args"], **event["kwargs"]) or True - - -def run( - func, - args=[], - kwargs={}, - service="lambda", - capture_response=False, - remote_aws_lambda_function_name=None, - remote_aws_region=None, - func_class=None, - func_class_init_kwargs=None, - **task_kwargs, -): - lambda_function_name = remote_aws_lambda_function_name or os.environ.get( - "AWS_LAMBDA_FUNCTION_NAME" - ) - if not lambda_function_name or lambda_function_name == "FORCE_LOCAL": + return (func(*event['args'], **event['kwargs']) + or True) + + +def run(func, args=[], kwargs={}, service='lambda', capture_response=False, + remote_aws_lambda_function_name=None, remote_aws_region=None, + func_class=None, func_class_init_kwargs=None, **task_kwargs): + lambda_function_name = (remote_aws_lambda_function_name + or os.environ.get('AWS_LAMBDA_FUNCTION_NAME')) + if not lambda_function_name or lambda_function_name == 'FORCE_LOCAL': # We are neither running in Lambda environment, nor given one to invoke # so let's run it synchronously -- so code can be compatible both in-and-out of Lambda func(*args, **kwargs) return True # zappa has more robust and allows more configs -- but is not compatible with func_class if zappa_run and not func_class: - return zappa_run( - func, - args, - kwargs, - service, - capture_response, - remote_aws_lambda_function_name, - remote_aws_region, - **task_kwargs, - ) + return zappa_run(func, args, kwargs, service, + capture_response, remote_aws_lambda_function_name, remote_aws_region, + **task_kwargs) task_path = get_func_task_path(func, func_class) - payload = json.dumps( - { - "task_path": task_path, - "args": args, - "kwargs": kwargs, - "func_class_init_kwargs": func_class_init_kwargs, - } - ).encode("utf-8") + payload = (json.dumps({'task_path': task_path, + 'args': args, + 'kwargs': kwargs, + 'func_class_init_kwargs': func_class_init_kwargs}) + .encode('utf-8')) if len(payload) > 128000: # pragma: no cover raise AsyncException("Payload too large for async Lambda call") - lambda_client = boto3.Session().client("lambda") + lambda_client = boto3.Session().client('lambda') response = lambda_client.invoke( FunctionName=lambda_function_name, - InvocationType="Event", # makes the call async - Payload=payload, + InvocationType='Event', # makes the call async + Payload=payload ) - return response.get("StatusCode", 0) == 202 + return response.get('StatusCode', 0) == 202 ## # Utility Functions ## - def import_and_get_task(task_path, instance_init_kwargs=None): """ Given a modular path to a function, import that module and return the function. """ - module, function = task_path.rsplit(".", 1) + module, function = task_path.rsplit('.', 1) app_module = importlib.import_module(module) - class_func = function.split("|") + class_func = function.split('|') app_function = getattr(app_module, class_func[0]) if len(class_func) == 1: return app_function def init_and_run(*args, **kwargs): - print("INITRUN", args, kwargs) + print('INITRUN', args, kwargs) if len(class_func) == 3: # instance instance = app_function # actually the class else: instance = app_function(**(instance_init_kwargs or {})) method = getattr(instance, class_func[1]) return method(*args, **kwargs) - return init_and_run @@ -139,11 +118,11 @@ def get_func_task_path(func, method_class=None): # Then we record that info with |'s to be decoded in import_and_get_task # classmethod format: "Foo|method|" # instance method format: "Foo|method" - task_path = "{}.{}{}{}".format( + task_path = '{}.{}{}{}'.format( module_path, - f"{method_class.__name__}|" if method_class else "", + f'{method_class.__name__}|' if method_class else '', func_name, - "|" if method_class and "of date_modified_after - ): + if date_modified_after and not key['LastModified'] > date_modified_after: continue # Convert date to iso string - key["LastModified"] = key["LastModified"].isoformat() + key['LastModified'] = key['LastModified'].isoformat() # Add to output dict - keys_dict[key.get("Key")] = key + keys_dict[key.get('Key')] = key # If more than 1000 results, continue with token - if resp.get("NextContinuationToken"): - continuation_token = resp["NextContinuationToken"] - + if resp.get('NextContinuationToken'): + continuation_token = resp['NextContinuationToken'] else: break - logger.debug(f"Retrieved {len(keys_dict)} keys") - + logger.debug(f'Retrieved {len(keys_dict)} keys') return keys_dict def key_exists(self, bucket, key): @@ -240,10 +194,10 @@ def key_exists(self, bucket, key): key_count = len(self.list_keys(bucket, prefix=key)) if key_count > 0: - logger.debug(f"Found {key} in {bucket}.") + logger.debug(f'Found {key} in {bucket}.') return True else: - logger.debug(f"Did not find {key} in {bucket}.") + logger.debug(f'Did not find {key} in {bucket}.') return False def create_bucket(self, bucket): @@ -274,9 +228,7 @@ def create_bucket(self, bucket): self.client.create_bucket(Bucket=bucket) - def put_file( - self, bucket, key, local_path, acl="bucket-owner-full-control", **kwargs - ): + def put_file(self, bucket, key, local_path, acl='bucket-owner-full-control', **kwargs): """ Uploads an object to an S3 bucket @@ -295,9 +247,7 @@ def put_file( info. """ - self.client.upload_file( - local_path, bucket, key, ExtraArgs={"ACL": acl, **kwargs} - ) + self.client.upload_file(local_path, bucket, key, ExtraArgs={'ACL': acl, **kwargs}) def remove_file(self, bucket, key): """ @@ -360,26 +310,15 @@ def get_url(self, bucket, key, expires_in=3600): A link to download the object """ - return self.client.generate_presigned_url( - ClientMethod="get_object", - Params={"Bucket": bucket, "Key": key}, - ExpiresIn=expires_in, - ) - - def transfer_bucket( - self, - origin_bucket, - origin_key, - destination_bucket, - destination_key=None, - suffix=None, - regex=None, - date_modified_before=None, - date_modified_after=None, - public_read=False, - remove_original=False, - **kwargs, - ): + return self.client.generate_presigned_url(ClientMethod='get_object', + Params={'Bucket': bucket, + 'Key': key}, + ExpiresIn=expires_in) + + def transfer_bucket(self, origin_bucket, origin_key, destination_bucket, + destination_key=None, suffix=None, regex=None, + date_modified_before=None, date_modified_after=None, + public_read=False, remove_original=False, **kwargs): """ Transfer files between s3 buckets @@ -414,22 +353,22 @@ def transfer_bucket( """ # If prefix, get all files for the prefix - if origin_key.endswith("/"): + if origin_key.endswith('/'): resp = self.list_keys( origin_bucket, prefix=origin_key, suffix=suffix, regex=regex, date_modified_before=date_modified_before, - date_modified_after=date_modified_after, + date_modified_after=date_modified_after ) - key_list = [value["Key"] for value in resp.values()] + key_list = [value['Key'] for value in resp.values()] else: key_list = [origin_key] for key in key_list: # If destination_key is prefix, replace - if destination_key and destination_key.endswith("/"): + if destination_key and destination_key.endswith('/'): dest_key = key.replace(origin_key, destination_key) # If single destination, use destination key @@ -440,18 +379,16 @@ def transfer_bucket( else: dest_key = key - copy_source = {"Bucket": origin_bucket, "Key": key} - self.client.copy( - copy_source, destination_bucket, dest_key, ExtraArgs=kwargs - ) + copy_source = {'Bucket': origin_bucket, 'Key': key} + self.client.copy(copy_source, destination_bucket, dest_key, ExtraArgs=kwargs) if remove_original: try: self.remove_file(origin_bucket, origin_key) except Exception as e: - logger.error("Failed to delete original key: " + str(e)) + logger.error('Failed to delete original key: ' + str(e)) if public_read: object_acl = self.s3.ObjectAcl(destination_bucket, destination_key) - object_acl.put(ACL="public-read") + object_acl.put(ACL='public-read') - logger.info(f"Finished syncing {len(key_list)} keys") + logger.info(f'Finished syncing {len(key_list)} keys') diff --git a/parsons/azure/__init__.py b/parsons/azure/__init__.py index 8e8455f3d6..84066fdbe9 100644 --- a/parsons/azure/__init__.py +++ b/parsons/azure/__init__.py @@ -1,3 +1,5 @@ from parsons.azure.azure_blob_storage import AzureBlobStorage -__all__ = ["AzureBlobStorage"] +__all__ = [ + 'AzureBlobStorage' +] diff --git a/parsons/azure/azure_blob_storage.py b/parsons/azure/azure_blob_storage.py index 9d824ce021..a7305b452e 100644 --- a/parsons/azure/azure_blob_storage.py +++ b/parsons/azure/azure_blob_storage.py @@ -33,31 +33,22 @@ class AzureBlobStorage(object): `AzureBlobStorage` """ - def __init__( - self, - account_name=None, - credential=None, - account_domain="blob.core.windows.net", - account_url=None, - ): - self.account_url = os.getenv("AZURE_ACCOUNT_URL", account_url) - self.credential = check_env.check("AZURE_CREDENTIAL", credential) + def __init__(self, account_name=None, credential=None, account_domain='blob.core.windows.net', + account_url=None): + self.account_url = os.getenv('AZURE_ACCOUNT_URL', account_url) + self.credential = check_env.check('AZURE_CREDENTIAL', credential) if not self.account_url: - self.account_name = check_env.check("AZURE_ACCOUNT_NAME", account_name) - self.account_domain = check_env.check( - "AZURE_ACCOUNT_DOMAIN", account_domain - ) - self.account_url = f"https://{self.account_name}.{self.account_domain}/" + self.account_name = check_env.check('AZURE_ACCOUNT_NAME', account_name) + self.account_domain = check_env.check('AZURE_ACCOUNT_DOMAIN', account_domain) + self.account_url = f'https://{self.account_name}.{self.account_domain}/' else: - if not self.account_url.startswith("http"): - self.account_url = f"https://{self.account_url}" + if not self.account_url.startswith('http'): + self.account_url = f'https://{self.account_url}' # Update the account name and domain if a URL is supplied parsed_url = urlparse(self.account_url) self.account_name = parsed_url.netloc.split(".")[0] self.account_domain = ".".join(parsed_url.netloc.split(".")[1:]) - self.client = BlobServiceClient( - account_url=self.account_url, credential=self.credential - ) + self.client = BlobServiceClient(account_url=self.account_url, credential=self.credential) def list_containers(self): """ @@ -68,10 +59,8 @@ def list_containers(self): List of container names """ - container_names = [ - container.name for container in self.client.list_containers() - ] - logger.info(f"Found {len(container_names)} containers.") + container_names = [container.name for container in self.client.list_containers()] + logger.info(f'Found {len(container_names)} containers.') return container_names def container_exists(self, container_name): @@ -88,10 +77,10 @@ def container_exists(self, container_name): container_client = self.get_container(container_name) try: container_client.get_container_properties() - logger.info(f"{container_name} exists.") + logger.info(f'{container_name} exists.') return True except ResourceNotFoundError: - logger.info(f"{container_name} does not exist.") + logger.info(f'{container_name} does not exist.') return False def get_container(self, container_name): @@ -105,12 +94,10 @@ def get_container(self, container_name): `ContainerClient` """ - logger.info(f"Returning {container_name} container client") + logger.info(f'Returning {container_name} container client') return self.client.get_container_client(container_name) - def create_container( - self, container_name, metadata=None, public_access=None, **kwargs - ): + def create_container(self, container_name, metadata=None, public_access=None, **kwargs): """ Create a container @@ -133,7 +120,7 @@ def create_container( container_client = self.client.create_container( container_name, metadata=metadata, public_access=public_access, **kwargs ) - logger.info(f"Created {container_name} container.") + logger.info(f'Created {container_name} container.') return container_client def delete_container(self, container_name): @@ -148,7 +135,7 @@ def delete_container(self, container_name): """ self.client.delete_container(container_name) - logger.info(f"{container_name} container deleted.") + logger.info(f'{container_name} container deleted.') def list_blobs(self, container_name, name_starts_with=None): """ @@ -166,10 +153,9 @@ def list_blobs(self, container_name, name_starts_with=None): container_client = self.get_container(container_name) blobs = [ - blob - for blob in container_client.list_blobs(name_starts_with=name_starts_with) + blob for blob in container_client.list_blobs(name_starts_with=name_starts_with) ] - logger.info(f"Found {len(blobs)} blobs in {container_name} container.") + logger.info(f'Found {len(blobs)} blobs in {container_name} container.') return blobs def blob_exists(self, container_name, blob_name): @@ -188,10 +174,10 @@ def blob_exists(self, container_name, blob_name): blob_client = self.get_blob(container_name, blob_name) try: blob_client.get_blob_properties() - logger.info(f"{blob_name} exists in {container_name} container.") + logger.info(f'{blob_name} exists in {container_name} container.') return True except ResourceNotFoundError: - logger.info(f"{blob_name} does not exist in {container_name} container.") + logger.info(f'{blob_name} does not exist in {container_name} container.') return False def get_blob(self, container_name, blob_name): @@ -208,18 +194,11 @@ def get_blob(self, container_name, blob_name): """ blob_client = self.client.get_blob_client(container_name, blob_name) - logger.info(f"Got {blob_name} blob from {container_name} container.") + logger.info(f'Got {blob_name} blob from {container_name} container.') return blob_client - def get_blob_url( - self, - container_name, - blob_name, - account_key=None, - permission=None, - expiry=None, - start=None, - ): + def get_blob_url(self, container_name, blob_name, account_key=None, permission=None, + expiry=None, start=None): """ Get a URL with a shared access signature for a blob @@ -248,7 +227,7 @@ def get_blob_url( if not account_key: if not self.credential: raise ValueError( - "An account shared access key must be provided if it was not on initialization" + 'An account shared access key must be provided if it was not on initialization' ) account_key = self.credential @@ -261,7 +240,7 @@ def get_blob_url( expiry=expiry, start=start, ) - return f"{self.account_url}/{container_name}/{blob_name}?sas={sas}" + return f'{self.account_url}/{container_name}/{blob_name}?sas={sas}' def _get_content_settings_from_dict(self, kwargs_dict): """ @@ -280,12 +259,8 @@ def _get_content_settings_from_dict(self, kwargs_dict): content_settings = None content_settings_dict = {} content_settings_keys = [ - "content_type", - "content_encoding", - "content_language", - "content_disposition", - "cache_control", - "content_md5", + 'content_type', 'content_encoding', 'content_language', 'content_disposition', + 'cache_control', 'content_md5' ] kwarg_keys = list(kwargs_copy.keys()) for key in kwarg_keys: @@ -321,7 +296,7 @@ def put_blob(self, container_name, blob_name, local_path, **kwargs): # Move all content_settings keys into a ContentSettings object content_settings, kwargs_dict = self._get_content_settings_from_dict(kwargs) - with open(local_path, "rb") as f: + with open(local_path, 'rb') as f: data = f.read() blob_client = blob_client.upload_blob( @@ -330,7 +305,7 @@ def put_blob(self, container_name, blob_name, local_path, **kwargs): content_settings=content_settings, **kwargs_dict, ) - logger.info(f"{blob_name} blob put in {container_name} container") + logger.info(f'{blob_name} blob put in {container_name} container') # Return refreshed BlobClient object return self.get_blob(container_name, blob_name) @@ -354,14 +329,14 @@ def download_blob(self, container_name, blob_name, local_path=None): """ if not local_path: - local_path = files.create_temp_file_for_path("TEMPFILEAZURE") + local_path = files.create_temp_file_for_path('TEMPFILEAZURE') blob_client = self.get_blob(container_name, blob_name) - logger.info(f"Downloading {blob_name} blob from {container_name} container.") - with open(local_path, "wb") as f: + logger.info(f'Downloading {blob_name} blob from {container_name} container.') + with open(local_path, 'wb') as f: blob_client.download_blob().readinto(f) - logger.info(f"{blob_name} blob saved to {local_path}.") + logger.info(f'{blob_name} blob saved to {local_path}.') return local_path @@ -380,9 +355,9 @@ def delete_blob(self, container_name, blob_name): blob_client = self.get_blob(container_name, blob_name) blob_client.delete_blob() - logger.info(f"{blob_name} blob in {container_name} container deleted.") + logger.info(f'{blob_name} blob in {container_name} container deleted.') - def upload_table(self, table, container_name, blob_name, data_type="csv", **kwargs): + def upload_table(self, table, container_name, blob_name, data_type='csv', **kwargs): """ Load the data from a Parsons table into a blob. @@ -401,16 +376,14 @@ def upload_table(self, table, container_name, blob_name, data_type="csv", **kwar `BlobClient` """ - if data_type == "csv": + if data_type == 'csv': local_path = table.to_csv() - content_type = "text/csv" - elif data_type == "json": + content_type = 'text/csv' + elif data_type == 'json': local_path = table.to_json() - content_type = "application/json" + content_type = 'application/json' else: - raise ValueError( - f"Unknown data_type value ({data_type}): must be one of: csv or json" - ) + raise ValueError(f'Unknown data_type value ({data_type}): must be one of: csv or json') return self.put_blob( container_name, blob_name, local_path, content_type=content_type, **kwargs diff --git a/parsons/bill_com/__init__.py b/parsons/bill_com/__init__.py index cff2e018ae..97b497841a 100644 --- a/parsons/bill_com/__init__.py +++ b/parsons/bill_com/__init__.py @@ -1,3 +1,5 @@ from parsons.bill_com.bill_com import BillCom -__all__ = ["BillCom"] +__all__ = [ + 'BillCom' +] diff --git a/parsons/bill_com/bill_com.py b/parsons/bill_com/bill_com.py index 83471ec2b7..ad024b65e7 100644 --- a/parsons/bill_com/bill_com.py +++ b/parsons/bill_com/bill_com.py @@ -19,19 +19,21 @@ class BillCom(object): """ def __init__(self, user_name, password, org_id, dev_key, api_url): - self.headers = {"Content-Type": "application/x-www-form-urlencoded"} + self.headers = { + "Content-Type": "application/x-www-form-urlencoded" + } params = { "userName": user_name, "password": password, "orgId": org_id, - "devKey": dev_key, + "devKey": dev_key } - response = requests.post( - url="%sLogin.json" % api_url, data=params, headers=self.headers - ) + response = requests.post(url="%sLogin.json" % api_url, + data=params, + headers=self.headers) self.dev_key = dev_key self.api_url = api_url - self.session_id = response.json()["response_data"]["sessionId"] + self.session_id = response.json()['response_data']['sessionId'] def _get_payload(self, data): """ @@ -45,11 +47,9 @@ def _get_payload(self, data): A dictionary of the payload to be sent in the request with the dev_key and sessionId added. """ - return { - "devKey": self.dev_key, - "sessionId": self.session_id, - "data": json.dumps(data), - } + return {"devKey": self.dev_key, + "sessionId": self.session_id, + "data": json.dumps(data)} def _post_request(self, data, action, object_name): """ @@ -68,10 +68,10 @@ def _post_request(self, data, action, object_name): A dictionary containing the JSON response from the post request. """ - if action == "Read": + if action == 'Read': url = "%sCrud/%s/%s.json" % (self.api_url, action, object_name) - elif action == "Create": - data["obj"]["entity"] = object_name + elif action == 'Create': + data['obj']['entity'] = object_name url = "%sCrud/%s/%s.json" % (self.api_url, action, object_name) elif action == "Send": url = "%s%s%s.json" % (self.api_url, action, object_name) @@ -119,10 +119,10 @@ def _paginate_list(self, response, data, object_name, field="response_data"): """ r_table = Table(response) - max_ct = data["max"] + max_ct = data['max'] while len(response) == max_ct: - data["start"] += max_ct + data['start'] += max_ct response = self._post_request(data, "List", object_name)[field] r_table.concat(Table(response)) @@ -141,7 +141,11 @@ def get_user_list(self, start_user=0, max_user=999, **kwargs): `Returns:` A Parsons Table of user information for every user from start_user to max_user. """ - data = {"start": start_user, "max": max_user, **kwargs} + data = { + "start": start_user, + "max": max_user, + **kwargs + } return self._get_request_response(data, "List", "User") @@ -159,7 +163,11 @@ def get_customer_list(self, start_customer=0, max_customer=999, **kwargs): A Parsons Table of customer information for every user from start_customer to max_customer. """ - data = {"start": start_customer, "max": max_customer, **kwargs} + data = { + "start": start_customer, + "max": max_customer, + **kwargs + } return self._get_request_response(data, "List", "Customer") @@ -177,7 +185,11 @@ def get_invoice_list(self, start_invoice=0, max_invoice=999, **kwargs): A list of dictionaries of invoice information for every invoice from start_invoice to max_invoice. """ - data = {"start": start_invoice, "max": max_invoice, **kwargs} + data = { + "start": start_invoice, + "max": max_invoice, + **kwargs + } return self._get_request_response(data, "List", "Invoice") @@ -190,7 +202,9 @@ def read_customer(self, customer_id): `Returns:` A dictionary of the customer's information. """ - data = {"id": customer_id} + data = { + 'id': customer_id + } return self._get_request_response(data, "Read", "Customer") def read_invoice(self, invoice_id): @@ -202,7 +216,9 @@ def read_invoice(self, invoice_id): `Returns:` A dictionary of the invoice information. """ - data = {"id": invoice_id} + data = { + "id": invoice_id + } return self._get_request_response(data, "Read", "Invoice") def check_customer(self, customer1, customer2): @@ -224,8 +240,8 @@ def check_customer(self, customer1, customer2): if "id" in customer1.keys(): if customer1["id"] == customer2["id"]: return True - if "id" not in customer1.keys() and customer2["email"]: - if customer1["email"].lower() == customer2["email"].lower(): + if "id" not in customer1.keys() and customer2['email']: + if customer1['email'].lower() == customer2['email'].lower(): return True return False @@ -245,7 +261,9 @@ def get_or_create_customer(self, customer_name, customer_email, **kwargs): If the customer already exists, this function will not create a new id and instead use the existing id. """ - customer = {"name": customer_name, "email": customer_email, **kwargs} + customer = {"name": customer_name, + "email": customer_email, + **kwargs} # check if customer already exists customer_list = self.get_customer_list() @@ -253,18 +271,13 @@ def get_or_create_customer(self, customer_name, customer_email, **kwargs): if self.check_customer(customer, existing_customer): return existing_customer # customer doesn't exist, create - data = {"obj": customer} + data = { + "obj": customer + } return self._get_request_response(data, "Create", "Customer") - def create_invoice( - self, - customer_id, - invoice_number, - invoice_date, - due_date, - invoice_line_items, - **kwargs - ): + def create_invoice(self, customer_id, invoice_number, invoice_date, + due_date, invoice_line_items, **kwargs): """ `Args:` customer_id: str @@ -288,28 +301,20 @@ def create_invoice( """ for invoice_line_item in invoice_line_items: if "entity" not in invoice_line_item: - invoice_line_item["entity"] = "InvoiceLineItem" + invoice_line_item['entity'] = 'InvoiceLineItem' data = { - "obj": { - "customerId": customer_id, - "invoiceNumber": invoice_number, - "invoiceDate": invoice_date, - "dueDate": due_date, - "invoiceLineItems": invoice_line_items, - **kwargs, - } + "obj": {"customerId": customer_id, + "invoiceNumber": invoice_number, + "invoiceDate": invoice_date, + "dueDate": due_date, + "invoiceLineItems": invoice_line_items, + **kwargs + } } return self._get_request_response(data, "Create", "Invoice") - def send_invoice( - self, - invoice_id, - from_user_id, - to_email_addresses, - message_subject, - message_body, - **kwargs - ): + def send_invoice(self, invoice_id, from_user_id, to_email_addresses, + message_subject, message_body, **kwargs): """ `Args:` invoice_id: str @@ -329,13 +334,15 @@ def send_invoice( A dictionary of the sent invoice. """ data = { - "invoiceId": invoice_id, - "headers": { - "fromUserId": from_user_id, - "toEmailAddresses": to_email_addresses, - "subject": message_subject, - **kwargs, - }, - "content": {"body": message_body}, + "invoiceId": invoice_id, + "headers": { + "fromUserId": from_user_id, + "toEmailAddresses": to_email_addresses, + "subject": message_subject, + **kwargs + }, + "content": { + "body": message_body + } } return self._get_request_response(data, "Send", "Invoice") diff --git a/parsons/bloomerang/__init__.py b/parsons/bloomerang/__init__.py index 1dfd6f79ab..14cfdd29dc 100644 --- a/parsons/bloomerang/__init__.py +++ b/parsons/bloomerang/__init__.py @@ -1,3 +1,5 @@ from parsons.bloomerang.bloomerang import Bloomerang -__all__ = ["Bloomerang"] +__all__ = [ + 'Bloomerang' +] diff --git a/parsons/bloomerang/bloomerang.py b/parsons/bloomerang/bloomerang.py index eb6cd858e5..0da712482f 100644 --- a/parsons/bloomerang/bloomerang.py +++ b/parsons/bloomerang/bloomerang.py @@ -34,56 +34,53 @@ class Bloomerang(object): """ def __init__(self, api_key=None, client_id=None, client_secret=None): - self.api_key = check_env.check("BLOOMERANG_API_KEY", api_key, optional=True) - self.client_id = check_env.check( - "BLOOMERANG_CLIENT_ID", client_id, optional=True - ) - self.client_secret = check_env.check( - "BLOOMERANG_CLIENT_SECRET", client_secret, optional=True - ) + self.api_key = check_env.check('BLOOMERANG_API_KEY', api_key, optional=True) + self.client_id = check_env.check('BLOOMERANG_CLIENT_ID', client_id, optional=True) + self.client_secret = check_env.check('BLOOMERANG_CLIENT_SECRET', client_secret, + optional=True) self.uri = URI self.uri_auth = URI_AUTH self.conn = self._conn() def _conn(self): # Instantiate APIConnector with authentication credentials - headers = {"accept": "application/json", "Content-Type": "application/json"} + headers = {"accept": "application/json", + "Content-Type": "application/json"} if self.api_key is not None: logger.info("Using API key authentication.") - headers["X-API-KEY"] = f"{self.api_key}" + headers['X-API-KEY'] = f"{self.api_key}" elif (self.client_id is not None) & (self.client_secret is not None): - logger.info("Using OAuth2 authentication.") + logger.info('Using OAuth2 authentication.') self._generate_authorization_code() self._generate_access_token() - headers["Authorization"] = f"Bearer {self.access_token}" + headers['Authorization'] = f"Bearer {self.access_token}" else: - raise Exception("Missing authorization credentials.") + raise Exception('Missing authorization credentials.') return APIConnector(uri=self.uri, headers=headers) def _generate_authorization_code(self): - data = {"client_id": self.client_id, "response_type": "code"} + data = {'client_id': self.client_id, + 'response_type': 'code'} r = requests.post(url=self.uri_auth, json=data) - self.authorization_code = r.json().get("code", None) + self.authorization_code = r.json().get('code', None) def _generate_access_token(self): - data = { - "client_id": self.client_id, - "client_secret": self.client_secret, - "grant_type": "authorization_code", - "code": self.authorization_code, - } - r = requests.post(url=self.uri + "oauth/token", json=data) - self.access_token = r.json().get("access_token", None) + data = {'client_id': self.client_id, + 'client_secret': self.client_secret, + 'grant_type': 'authorization_code', + 'code': self.authorization_code} + r = requests.post(url=self.uri + 'oauth/token', json=data) + self.access_token = r.json().get('access_token', None) def _base_endpoint(self, endpoint, entity_id=None): - url = f"{self.uri}{endpoint}/" + url = f'{self.uri}{endpoint}/' if entity_id: - url = url + f"{entity_id}/" + url = url + f'{entity_id}/' return url @staticmethod def _base_pagination_params(page_number=1, page_size=50): - return {"skip": page_size * (page_number - 1), "take": min(page_size, 50)} + return {'skip': page_size * (page_number - 1), 'take': min(page_size, 50)} @staticmethod def _base_ordering_params(order_by=None, order_direction=None): @@ -98,19 +95,15 @@ def _base_ordering_params(order_by=None, order_direction=None): return params def _base_create(self, endpoint, entity_id=None, **kwargs): - return self.conn.post_request( - url=self._base_endpoint(endpoint, entity_id), json=json.dumps({**kwargs}) - ) + return self.conn.post_request(url=self._base_endpoint(endpoint, entity_id), + json=json.dumps({**kwargs})) def _base_update(self, endpoint, entity_id=None, **kwargs): - return self.conn.put_request( - url=self._base_endpoint(endpoint, entity_id), json=json.dumps({**kwargs}) - ) + return self.conn.put_request(url=self._base_endpoint(endpoint, entity_id), + json=json.dumps({**kwargs})) def _base_get(self, endpoint, entity_id=None, params=None): - return self.conn.get_request( - url=self._base_endpoint(endpoint, entity_id), params=params - ) + return self.conn.get_request(url=self._base_endpoint(endpoint, entity_id), params=params) def _base_delete(self, endpoint, entity_id=None): return self.conn.delete_request(url=self._base_endpoint(endpoint, entity_id)) @@ -123,7 +116,7 @@ def create_constituent(self, **kwargs): See the Bloomerang API docs for a full list of `fields `_. # noqa """ - return self._base_create("constituent", **kwargs) + return self._base_create('constituent', **kwargs) def update_constituent(self, constituent_id, **kwargs): """ @@ -135,7 +128,7 @@ def update_constituent(self, constituent_id, **kwargs): See the Bloomerang API docs for a full list of `fields `_. # noqa """ - return self._base_update("constituent", entity_id=constituent_id, **kwargs) + return self._base_update('constituent', entity_id=constituent_id, **kwargs) def get_constituent(self, constituent_id): """ @@ -145,7 +138,7 @@ def get_constituent(self, constituent_id): `Returns:` A JSON of the entry or an error. """ - return self._base_get("constituent", entity_id=constituent_id) + return self._base_get('constituent', entity_id=constituent_id) def delete_constituent(self, constituent_id): """ @@ -153,16 +146,10 @@ def delete_constituent(self, constituent_id): constituent_id: str or int Constituent ID to delete """ - return self._base_delete("constituent", entity_id=constituent_id) + return self._base_delete('constituent', entity_id=constituent_id) - def get_constituents( - self, - page_number=1, - page_size=50, - order_by=None, - order_direction=None, - last_modified=None, - ): + def get_constituents(self, page_number=1, page_size=50, order_by=None, order_direction=None, + last_modified=None): """ `Args:` page_number: int @@ -184,8 +171,8 @@ def get_constituents( if last_modified: params["lastModified"] = last_modified - response = self._base_get("constituents", params=params) - return Table(response["Results"]) + response = self._base_get('constituents', params=params) + return Table(response['Results']) def create_transaction(self, **kwargs): """ @@ -195,7 +182,7 @@ def create_transaction(self, **kwargs): See the Bloomerang API docs for a full list of `fields `_. # noqa """ - return self._base_create("transaction", **kwargs) + return self._base_create('transaction', **kwargs) def update_transaction(self, transaction_id, **kwargs): """ @@ -207,7 +194,7 @@ def update_transaction(self, transaction_id, **kwargs): See the Bloomerang API docs for a full list of `fields `_. # noqa """ - return self._base_update("transaction", entity_id=transaction_id, **kwargs) + return self._base_update('transaction', entity_id=transaction_id, **kwargs) def get_transaction(self, transaction_id): """ @@ -217,7 +204,7 @@ def get_transaction(self, transaction_id): `Returns:` A JSON of the entry or an error. """ - return self._base_get("transaction", entity_id=transaction_id) + return self._base_get('transaction', entity_id=transaction_id) def delete_transaction(self, transaction_id): """ @@ -225,11 +212,9 @@ def delete_transaction(self, transaction_id): transaction_id: str or int Transaction ID to delete """ - return self._base_delete("transaction", entity_id=transaction_id) + return self._base_delete('transaction', entity_id=transaction_id) - def get_transactions( - self, page_number=1, page_size=50, order_by=None, order_direction=None - ): + def get_transactions(self, page_number=1, page_size=50, order_by=None, order_direction=None): """ `Args:` page_number: int @@ -246,8 +231,8 @@ def get_transactions( params = self._base_pagination_params(page_number, page_size) params.update(self._base_ordering_params(order_by, order_direction)) - response = self._base_get("transactions", params=params) - return Table(response["Results"]) + response = self._base_get('transactions', params=params) + return Table(response['Results']) def get_transaction_designation(self, designation_id): """ @@ -257,11 +242,10 @@ def get_transaction_designation(self, designation_id): `Returns:` A JSON of the entry or an error. """ - return self._base_get("transaction/designation", entity_id=designation_id) + return self._base_get('transaction/designation', entity_id=designation_id) - def get_transaction_designations( - self, page_number=1, page_size=50, order_by=None, order_direction=None - ): + def get_transaction_designations(self, page_number=1, page_size=50, order_by=None, + order_direction=None): """ `Args:` page_number: int @@ -278,8 +262,8 @@ def get_transaction_designations( params = self._base_pagination_params(page_number, page_size) params.update(self._base_ordering_params(order_by, order_direction)) - response = self._base_get("transactions/designations", params=params) - return Table(response["Results"]) + response = self._base_get('transactions/designations', params=params) + return Table(response['Results']) def create_interaction(self, **kwargs): """ @@ -289,7 +273,7 @@ def create_interaction(self, **kwargs): See the Bloomerang API docs for a full list of `fields `_. # noqa """ - return self._base_create("interaction", **kwargs) + return self._base_create('interaction', **kwargs) def update_interaction(self, interaction_id, **kwargs): """ @@ -301,7 +285,7 @@ def update_interaction(self, interaction_id, **kwargs): See the Bloomerang API docs for a full list of `fields `_. # noqa """ - return self._base_update("interaction", entity_id=interaction_id, **kwargs) + return self._base_update('interaction', entity_id=interaction_id, **kwargs) def get_interaction(self, interaction_id): """ @@ -311,7 +295,7 @@ def get_interaction(self, interaction_id): `Returns:` A JSON of the entry or an error. """ - return self._base_get("interaction", entity_id=interaction_id) + return self._base_get('interaction', entity_id=interaction_id) def delete_interaction(self, interaction_id): """ @@ -319,7 +303,7 @@ def delete_interaction(self, interaction_id): interaction_id: str or int Interaction ID to delete """ - return self._base_delete("interaction", entity_id=interaction_id) + return self._base_delete('interaction', entity_id=interaction_id) def get_interactions(self, page_number=1, page_size=50): """ @@ -332,5 +316,5 @@ def get_interactions(self, page_number=1, page_size=50): A JSON of the entry or an error. """ params = self._base_pagination_params(page_number, page_size) - response = self._base_get("interactions", params=params) - return Table(response["Results"]) + response = self._base_get('interactions', params=params) + return Table(response['Results']) diff --git a/parsons/bluelink/__init__.py b/parsons/bluelink/__init__.py index d3c77bf6f1..754e64bdfc 100644 --- a/parsons/bluelink/__init__.py +++ b/parsons/bluelink/__init__.py @@ -1,21 +1,9 @@ from parsons.bluelink.bluelink import Bluelink -from parsons.bluelink.person import ( - BluelinkPerson, - BluelinkEmail, - BluelinkAddress, - BluelinkPhone, - BluelinkIdentifier, - BluelinkTag, - BluelinkScore, -) +from parsons.bluelink.person import BluelinkPerson, \ + BluelinkEmail, BluelinkAddress, BluelinkPhone, BluelinkIdentifier, BluelinkTag, BluelinkScore __all__ = [ - "Bluelink", - "BluelinkPerson", - "BluelinkEmail", - "BluelinkAddress", - "BluelinkPhone", - "BluelinkIdentifier", - "BluelinkTag", - "BluelinkScore", + 'Bluelink', + 'BluelinkPerson', 'BluelinkEmail', 'BluelinkAddress', 'BluelinkPhone', + 'BluelinkIdentifier', 'BluelinkTag', 'BluelinkScore' ] diff --git a/parsons/bluelink/bluelink.py b/parsons/bluelink/bluelink.py index d132d00c9c..eabb791e69 100644 --- a/parsons/bluelink/bluelink.py +++ b/parsons/bluelink/bluelink.py @@ -6,7 +6,7 @@ logger = logging.getLogger(__name__) -API_URL = "https://api.bluelink.org/webhooks/" +API_URL = 'https://api.bluelink.org/webhooks/' class Bluelink: @@ -21,17 +21,14 @@ class Bluelink: password: str Bluelink webhook password. """ - def __init__(self, user=None, password=None): - self.user = check_env.check("BLUELINK_WEBHOOK_USER", user) - self.password = check_env.check("BLUELINK_WEBHOOK_PASSWORD", password) + self.user = check_env.check('BLUELINK_WEBHOOK_USER', user) + self.password = check_env.check('BLUELINK_WEBHOOK_PASSWORD', password) self.headers = { "Content-Type": "application/json", } self.api_url = API_URL - self.api = APIConnector( - self.api_url, auth=(self.user, self.password), headers=self.headers - ) + self.api = APIConnector(self.api_url, auth=(self.user, self.password), headers=self.headers) def upsert_person(self, source, person=None): """ @@ -51,11 +48,12 @@ def upsert_person(self, source, person=None): int An http status code from the http post request to the Bluelink webhook. """ - data = {"source": source, "person": person} - jdata = json.dumps( - data, - default=lambda o: {k: v for k, v in o.__dict__.items() if v is not None}, - ) + data = { + 'source': source, + 'person': person + } + jdata = json.dumps(data, + default=lambda o: {k: v for k, v in o.__dict__.items() if v is not None}) resp = self.api.post_request(url=self.api_url, data=jdata) return resp diff --git a/parsons/bluelink/person.py b/parsons/bluelink/person.py index f808c090f1..261104d764 100644 --- a/parsons/bluelink/person.py +++ b/parsons/bluelink/person.py @@ -39,29 +39,13 @@ class BluelinkPerson(object): details: dict additional custom data. must be json serializable. """ - - def __init__( - self, - identifiers, - given_name=None, - family_name=None, - phones=None, - emails=None, - addresses=None, - tags=None, - employer=None, - employer_address=None, - occupation=None, - scores=None, - birthdate=None, - details=None, - ): + def __init__(self, identifiers, given_name=None, family_name=None, phones=None, emails=None, + addresses=None, tags=None, employer=None, employer_address=None, + occupation=None, scores=None, birthdate=None, details=None): if not identifiers: - raise Exception( - "BluelinkPerson requires list of BluelinkIdentifiers with " - "at least 1 BluelinkIdentifier" - ) + raise Exception("BluelinkPerson requires list of BluelinkIdentifiers with " + "at least 1 BluelinkIdentifier") self.identifiers = identifiers self.addresses = addresses @@ -127,7 +111,6 @@ class BluelinkIdentifier(object): details: dict dictionary of custom fields. must be serializable to json. """ - def __init__(self, source, identifier, details=None): self.source = source self.identifier = identifier @@ -148,7 +131,6 @@ class BluelinkEmail(object): status: str One of "Potential", "Subscribed", "Unsubscribed", "Bouncing", or "Spam Complaints" """ - def __init__(self, address, primary=None, type=None, status=None): self.address = address self.primary = primary @@ -178,18 +160,10 @@ class BluelinkAddress(object): status: str A value representing the status of the address. "Potential", "Verified" or "Bad" """ - - def __init__( - self, - address_lines=None, - city=None, - state=None, - postal_code=None, - country=None, - type=None, - venue=None, - status=None, - ): + def __init__(self, + address_lines=None, + city=None, state=None, postal_code=None, country=None, + type=None, venue=None, status=None): self.address_lines = address_lines or [] self.city = city @@ -224,18 +198,8 @@ class BluelinkPhone(object): details: dict Additional data dictionary. Must be json serializable. """ - - def __init__( - self, - number, - primary=None, - description=None, - type=None, - country=None, - sms_capable=None, - do_not_call=None, - details=None, - ): + def __init__(self, number, primary=None, description=None, type=None, country=None, + sms_capable=None, do_not_call=None, details=None): self.number = number self.primary = primary self.description = description @@ -255,7 +219,6 @@ class BluelinkTag(object): A tag string; convention is either a simple string or a string with a prefix separated by a colon, e.g., “DONOR:GRASSROOTS” """ - def __init__(self, tag): self.tag = tag @@ -273,7 +236,6 @@ class BluelinkScore(object): source: str Original source of this score. """ - def __init__(self, score, score_type, source): self.score = score self.score_type = score_type diff --git a/parsons/box/__init__.py b/parsons/box/__init__.py index 64181e16f6..09028d7bff 100644 --- a/parsons/box/__init__.py +++ b/parsons/box/__init__.py @@ -1,3 +1,5 @@ from parsons.box.box import Box -__all__ = ["Box"] +__all__ = [ + 'Box' +] diff --git a/parsons/box/box.py b/parsons/box/box.py index 56b1aab0f6..fe2810f528 100644 --- a/parsons/box/box.py +++ b/parsons/box/box.py @@ -29,7 +29,7 @@ logger = logging.getLogger(__name__) -DEFAULT_FOLDER_ID = "0" +DEFAULT_FOLDER_ID = '0' class Box(object): @@ -56,17 +56,18 @@ class Box(object): that contain many items. If performance is an issue, please use the corresponding folder_id/file_id methods for each function. """ - # In what formats can we upload/save Tables to Box? For now csv and JSON. - ALLOWED_FILE_FORMATS = ["csv", "json"] + ALLOWED_FILE_FORMATS = ['csv', 'json'] def __init__(self, client_id=None, client_secret=None, access_token=None): - client_id = check_env("BOX_CLIENT_ID", client_id) - client_secret = check_env("BOX_CLIENT_SECRET", client_secret) - access_token = check_env("BOX_ACCESS_TOKEN", access_token) + client_id = check_env('BOX_CLIENT_ID', client_id) + client_secret = check_env('BOX_CLIENT_SECRET', client_secret) + access_token = check_env('BOX_ACCESS_TOKEN', access_token) oauth = boxsdk.OAuth2( - client_id=client_id, client_secret=client_secret, access_token=access_token + client_id=client_id, + client_secret=client_secret, + access_token=access_token ) self.client = boxsdk.Client(oauth) @@ -81,17 +82,16 @@ def create_folder(self, path) -> str: `Returns`: str: The Box id of the newly-created folder. """ - if "/" in path: - parent_folder_path, folder_name = path.rsplit(sep="/", maxsplit=1) + if '/' in path: + parent_folder_path, folder_name = path.rsplit(sep='/', maxsplit=1) parent_folder_id = self.get_item_id(path=parent_folder_path) else: folder_name = path parent_folder_id = DEFAULT_FOLDER_ID return self.create_folder_by_id(folder_name, parent_folder_id=parent_folder_id) - def create_folder_by_id( - self, folder_name, parent_folder_id=DEFAULT_FOLDER_ID - ) -> str: + def create_folder_by_id(self, folder_name, + parent_folder_id=DEFAULT_FOLDER_ID) -> str: """Create a Box folder. `Args`: @@ -145,7 +145,7 @@ def delete_file_by_id(self, file_id) -> None: """ self.client.file(file_id=file_id).delete() - def list(self, path="", item_type=None) -> Table: + def list(self, path='', item_type=None) -> Table: """Return a Table of Box files and/or folders found at a path. `Args`: @@ -166,10 +166,10 @@ def list(self, path="", item_type=None) -> Table: return self.list_items_by_id(folder_id=folder_id, item_type=item_type) def list_items_by_id(self, folder_id=DEFAULT_FOLDER_ID, item_type=None) -> Table: - url = "https://api.box.com/2.0/folders/" + folder_id - json_response = self.client.make_request("GET", url) + url = 'https://api.box.com/2.0/folders/' + folder_id + json_response = self.client.make_request('GET', url) - items = Table(json_response.json()["item_collection"]["entries"]) + items = Table(json_response.json()['item_collection']['entries']) if item_type: items = items.select_rows(lambda row: row.type == item_type) return items @@ -184,7 +184,7 @@ def list_files_by_id(self, folder_id=DEFAULT_FOLDER_ID) -> Table: `Returns`: Table A Parsons table of files and their attributes. """ - return self.list_items_by_id(folder_id=folder_id, item_type="file") + return self.list_items_by_id(folder_id=folder_id, item_type='file') def list_folders_by_id(self, folder_id=DEFAULT_FOLDER_ID) -> Table: """List all Box folders. @@ -196,9 +196,9 @@ def list_folders_by_id(self, folder_id=DEFAULT_FOLDER_ID) -> Table: `Returns`: Table A Parsons table of folders and their attributes. """ - return self.list_items_by_id(folder_id=folder_id, item_type="folder") + return self.list_items_by_id(folder_id=folder_id, item_type='folder') - def upload_table(self, table, path="", format="csv") -> boxsdk.object.file.File: + def upload_table(self, table, path='', format='csv') -> boxsdk.object.file.File: """Save the passed table to Box. `Args`: @@ -212,20 +212,18 @@ def upload_table(self, table, path="", format="csv") -> boxsdk.object.file.File: `Returns`: BoxFile A Box File object """ - if "/" in path: - folder_path, file_name = path.rsplit(sep="/", maxsplit=1) + if '/' in path: + folder_path, file_name = path.rsplit(sep='/', maxsplit=1) folder_id = self.get_item_id(path=folder_path) else: # pragma: no cover file_name = path folder_id = DEFAULT_FOLDER_ID - return self.upload_table_to_folder_id( - table=table, file_name=file_name, folder_id=folder_id, format=format - ) + return self.upload_table_to_folder_id(table=table, file_name=file_name, + folder_id=folder_id, format=format) - def upload_table_to_folder_id( - self, table, file_name, folder_id=DEFAULT_FOLDER_ID, format="csv" - ) -> boxsdk.object.file.File: + def upload_table_to_folder_id(self, table, file_name, folder_id=DEFAULT_FOLDER_ID, + format='csv') -> boxsdk.object.file.File: """Save the passed table to Box. `Args`: @@ -243,27 +241,23 @@ def upload_table_to_folder_id( """ if format not in self.ALLOWED_FILE_FORMATS: - raise ValueError( - f"Format argument to upload_table() must be in one " - f'of {self.ALLOWED_FILE_FORMATS}; found "{format}"' - ) + raise ValueError(f'Format argument to upload_table() must be in one ' + f'of {self.ALLOWED_FILE_FORMATS}; found "{format}"') # Create a temp directory in which we will let Parsons create a # file. Both will go away automatically when we leave scope. with tempfile.TemporaryDirectory() as temp_dir_name: - temp_file_path = temp_dir_name + "/table.tmp" - if format == "csv": + temp_file_path = temp_dir_name + '/table.tmp' + if format == 'csv': table.to_csv(local_path=temp_file_path) - elif format == "json": + elif format == 'json': table.to_json(local_path=temp_file_path) else: - raise SystemError( - f"Got (theoretically) impossible " f'format option "{format}"' - ) # pragma: no cover + raise SystemError(f'Got (theoretically) impossible ' + f'format option "{format}"') # pragma: no cover - new_file = self.client.folder(folder_id).upload( - file_path=temp_file_path, file_name=file_name - ) + new_file = self.client.folder(folder_id).upload(file_path=temp_file_path, + file_name=file_name) return new_file def download_file(self, path: str, local_path: str = None) -> str: @@ -289,12 +283,12 @@ def download_file(self, path: str, local_path: str = None) -> str: file_id = self.get_item_id(path) - with open(local_path, "wb") as output_file: + with open(local_path, 'wb') as output_file: self.client.file(file_id).download_to(output_file) return local_path - def get_table(self, path, format="csv") -> Table: + def get_table(self, path, format='csv') -> Table: """Get a table that has been saved to Box in csv or JSON format. `Args`: @@ -309,7 +303,7 @@ def get_table(self, path, format="csv") -> Table: file_id = self.get_item_id(path) return self.get_table_by_file_id(file_id=file_id, format=format) - def get_table_by_file_id(self, file_id, format="csv") -> Table: + def get_table_by_file_id(self, file_id, format='csv') -> Table: """Get a table that has been saved to Box in csv or JSON format. `Args`: @@ -322,25 +316,22 @@ def get_table_by_file_id(self, file_id, format="csv") -> Table: A Parsons Table. """ if format not in self.ALLOWED_FILE_FORMATS: - raise ValueError( - f"Format argument to upload_table() must be in one " - f'of {self.ALLOWED_FILE_FORMATS}; found "{format}"' - ) + raise ValueError(f'Format argument to upload_table() must be in one ' + f'of {self.ALLOWED_FILE_FORMATS}; found "{format}"') # Temp file will be around as long as enclosing process is running, # which we need, because the Table we return will continue to use it. output_file_name = create_temp_file() - with open(output_file_name, "wb") as output_file: + with open(output_file_name, 'wb') as output_file: self.client.file(file_id).download_to(output_file) - if format == "csv": + if format == 'csv': return Table.from_csv(output_file_name) - elif format == "json": + elif format == 'json': return Table.from_json(output_file_name) else: - raise SystemError( - f"Got (theoretically) impossible " f'format option "{format}"' - ) # pragma: no cover + raise SystemError(f'Got (theoretically) impossible ' + f'format option "{format}"') # pragma: no cover def get_item_id(self, path, base_folder_id=DEFAULT_FOLDER_ID) -> str: """Given a path-like object, try to return the id for the file or @@ -365,14 +356,14 @@ def get_item_id(self, path, base_folder_id=DEFAULT_FOLDER_ID) -> str: try: # Grab the leftmost element in the path - this is what we're # looking for in this folder. - if "/" in path: - this_element, path = path.split(sep="/", maxsplit=1) - if path == "": + if '/' in path: + this_element, path = path.split(sep='/', maxsplit=1) + if path == '': raise ValueError('Illegal trailing "/" in file path') else: this_element = path - path = "" + path = '' # Look in our current base_folder for an item whose name matches the # current element. If we're at initial, non-recursed call, base_folder @@ -392,7 +383,7 @@ def get_item_id(self, path, base_folder_id=DEFAULT_FOLDER_ID) -> str: # If there *are* more elements in the path, we need to check that this item is # in fact a folder so we can recurse and search inside it. - if item.type != "folder": + if item.type != 'folder': raise ValueError(f'Invalid folder "{this_element}"') return self.get_item_id(path=path, base_folder_id=item_id) diff --git a/parsons/braintree/__init__.py b/parsons/braintree/__init__.py index b1248c69e8..73d1cfff50 100644 --- a/parsons/braintree/__init__.py +++ b/parsons/braintree/__init__.py @@ -1,3 +1,5 @@ from parsons.braintree.braintree import Braintree -__all__ = ["Braintree"] +__all__ = [ + 'Braintree' +] diff --git a/parsons/braintree/braintree.py b/parsons/braintree/braintree.py index adf07d1dbf..3581c51ec8 100644 --- a/parsons/braintree/braintree.py +++ b/parsons/braintree/braintree.py @@ -36,172 +36,123 @@ class Braintree(object): """ query_types = { - "dispute": braintree.DisputeSearch, - "transaction": braintree.TransactionSearch, - "subscription": braintree.SubscriptionSearch, + 'dispute': braintree.DisputeSearch, + 'transaction': braintree.TransactionSearch, } credit_card_fields = [ - "bin", - "card_type", - "cardholder_name", - "commercial", - "country_of_issuance", - "customer_location", - "debit", - "durbin_regulated", - "expiration_month", - "expiration_year", - "healthcare", - "image_url", - "issuing_bank", - "last_4", - "payroll", - "prepaid", - "product_id", - "token", - "venmo_sdk", + 'bin', + 'card_type', + 'cardholder_name', + 'commercial', + 'country_of_issuance', + 'customer_location', + 'debit', + 'durbin_regulated', + 'expiration_month', + 'expiration_year', + 'healthcare', + 'image_url', + 'issuing_bank', + 'last_4', + 'payroll', + 'prepaid', + 'product_id', + 'token', + 'venmo_sdk', ] disbursement_fields = [ - "disbursement_date", # => disbursement_date column - "funds_held", - "settlement_amount", - "settlement_currency_exchange_rate", - "settlement_currency_iso_code", - "success", + 'disbursement_date', # => disbursement_date column + 'funds_held', + 'settlement_amount', + 'settlement_currency_exchange_rate', + 'settlement_currency_iso_code', + 'success', ] transaction_fields = [ - "additional_processor_response", - "amount", - "avs_error_response_code", - "avs_postal_code_response_code", - "avs_street_address_response_code", - "channel", - "created_at", - "currency_iso_code", - "cvv_response_code", - "discount_amount", - "escrow_status", - "gateway_rejection_reason", - "id", - "master_merchant_account_id", - "merchant_account_id", - "order_id", - "payment_instrument_type", - "plan_id", - "processor_authorization_code", - "processor_response_code", - "processor_response_text", - "processor_settlement_response_code", - "processor_settlement_response_text", - "purchase_order_number", - "recurring", - "refund_id", - "refunded_transaction_id", - "service_fee_amount", - "settlement_batch_id", - "shipping_amount", - "ships_from_postal_code", - "status", - "sub_merchant_account_id", - "subscription_id", - "tax_amount", - "tax_exempt", - "type", - "updated_at", - "voice_referral_number", + 'additional_processor_response', + 'amount', + 'avs_error_response_code', + 'avs_postal_code_response_code', + 'avs_street_address_response_code', + 'channel', + 'created_at', + 'currency_iso_code', + 'cvv_response_code', + 'discount_amount', + 'escrow_status', + 'gateway_rejection_reason', + 'id', + 'master_merchant_account_id', + 'merchant_account_id', + 'order_id', + 'payment_instrument_type', + 'plan_id', + 'processor_authorization_code', + 'processor_response_code', + 'processor_response_text', + 'processor_settlement_response_code', + 'processor_settlement_response_text', + 'purchase_order_number', + 'recurring', + 'refund_id', + 'refunded_transaction_id', + 'service_fee_amount', + 'settlement_batch_id', + 'shipping_amount', + 'ships_from_postal_code', + 'status', + 'sub_merchant_account_id', + 'subscription_id', + 'tax_amount', + 'tax_exempt', + 'type', + 'updated_at', + 'voice_referral_number', ] dispute_fields = [ - "id", - "amount_disputed", - "amount_won", - "case_number", - "currency_iso_code", - "kind", - "merchant_account_id", - "original_dispute_id", - "processor_comments", - "reason", - "reason_code", - "reason_description", - "received_date", - "reference_number", - "reply_by_date", - "status", + 'id', + 'amount_disputed', + 'amount_won', + 'case_number', + 'currency_iso_code', + 'kind', + 'merchant_account_id', + 'original_dispute_id', + 'processor_comments', + 'reason', + 'reason_code', + 'reason_description', + 'received_date', + 'reference_number', + 'reply_by_date', + 'status', # 'transaction.id', # DOT id -- needs to be special-cased (below) ] - subscription_fields = [ - "add_ons", - "balance", - "billing_day_of_month", - "billing_period_end_date", - "billing_period_start_date", - "created_at", - "current_billing_cycle", - "days_past_due", - "description", - # 'descriptor', # covered under descriptor_fields - "discounts", - "failure_count", - "first_billing_date", - "id", - "merchant_account_id", - "never_expires", - "next_bill_amount", - "next_billing_date", - "next_billing_period_amount", - "number_of_billing_cycles", - "paid_through_date", - "payment_method_token", - "plan_id", - "price", - "status", - "status_history", - # 'transactions', # special-cased - "trial_duration", - "trial_duration_unit", - "trial_period", - "updated_at", - ] - - descriptor_fields = ["name", "phone", "url"] - - customer_fields = ["first_name", "last_name", "email"] - - def __init__( - self, - merchant_id=None, - public_key=None, - private_key=None, - timeout=None, - production=True, - ): - merchant_id = check_env("BRAINTREE_MERCHANT_ID", merchant_id) - public_key = check_env("BRAINTREE_PUBLIC_KEY", public_key) - private_key = check_env("BRAINTREE_PRIVATE_KEY", private_key) - timeout = check_env("BRAINTREE_TIMEOUT", timeout, optional=True) or 200 + def __init__(self, merchant_id=None, public_key=None, private_key=None, + timeout=None, production=True): + merchant_id = check_env('BRAINTREE_MERCHANT_ID', merchant_id) + public_key = check_env('BRAINTREE_PUBLIC_KEY', public_key) + private_key = check_env('BRAINTREE_PRIVATE_KEY', private_key) + timeout = check_env('BRAINTREE_TIMEOUT', timeout, optional=True) or 200 self.gateway = braintree.BraintreeGateway( braintree.Configuration( - environment=( - braintree.Environment.Production - if production - else braintree.Environment.Sandbox - ), + environment=(braintree.Environment.Production if production + else braintree.Environment.Sandbox), merchant_id=merchant_id, public_key=public_key, private_key=private_key, - timeout=timeout, - ) - ) + timeout=timeout)) - def get_disputes( - self, start_date=None, end_date=None, query_list=None, query_dict=None - ): + def get_disputes(self, + start_date=None, end_date=None, + query_list=None, + query_dict=None): """ Get a table of disputes based on query parameters. There are three ways to pass query arguments: Pass a start_date and end_date @@ -233,122 +184,29 @@ def get_disputes( Table Class """ collection = self._get_collection( - "dispute", + 'dispute', query_list=query_list, query_dict=query_dict, default_query=( - {"effective_date": dict(between=[start_date, end_date])} + {'effective_date': dict(between=[start_date, end_date])} if start_date and end_date else None - ), - ) + )) # Iterating on collection.items triggers web requests in batches of 50 records # Disputes query api doesn't return the ids -- we can't do anything but iterate if not collection.is_success: - raise ParsonsBraintreeError( - f"Braintree dispute query failed: {collection.message}" - ) - return Table( - [self._dispute_header()] - + [self._dispute_to_row(r) for r in collection.disputes.items] - ) - - def get_subscriptions( - self, - table_of_ids=None, - start_date=None, - end_date=None, - query_list=None, - query_dict=None, - include_transactions=False, - just_ids=False, - ): - """ - Get a table of subscriptions based on query parameters. - There are three ways to pass query arguments: - Pass a disbursement_start_date and disbursement_end_date together - for a date range, or pass a query_list or query_dict argument. - - `Args:` - start_date: date or str - Start date of the subscription range. Requires `end_date` arg. - e.g. '2020-11-03' - end_date: date or str - End date of the subscription range. Requires `start_date` arg. - e.g. '2020-11-03' - query_list: list of braintree.SubscriptionSearch - You can use the `braintree.SubscriptionSearch - `_ - to create a manual list of query parameters. - query_dict: jsonable-dict - query_dict is basically the same as query_list, except instead of using their API - objects, you can pass it in pure dictionary form. - Some examples: - .. highlight:: python - .. code-block:: python - - # The start_date/end_date arguments are the same as - {"created_at": {"between": [start_date, end_date]}} - # some other examples - {"merchant_account_id": {"in_list": [123, 456]}} - {"created_at": {"greater_than_or_equal": "2020-03-10"}} - include_transactions: bool - If this is true, include the full collection of transaction objects. - Otherwise, just return a list of transaction IDs. - just_ids: bool - While querying a list of subscription ids is a single, fast query to Braintree's - API, getting all data for each subscription is force-paginated at 50-records per - request. If you just need a count or the list of ids, then set `just_ids=True` and - it will return a single column with `id` instead of all table columns. - table_of_ids: Table with an `id` column -- i.e. a table returned from `just_ids=True` - Subsequently, after calling this with `just_ids`, you can prune/alter the ids table - and then pass the table back to get the full data. - These are somewhat-niche use-cases, but occasionally crucial - when a search result returns 1000s of ids. - `Returns:` - Table Class - """ - collection = self._get_collection( - "subscription", - table_of_ids=table_of_ids, - query_list=query_list, - query_dict=query_dict, - default_query=( - {"created_at": dict(between=[start_date, end_date])} - if start_date and end_date - else None - ), - ) - query_count = len(collection.ids) - logger.info( - f"Braintree subscriptions search resulted in subscriptions count of {query_count}" - ) - if just_ids: - return Table([("id",)] + [[item_id] for item_id in collection.ids]) - - # Iterating on collection.items triggers web requests in batches of 50 records - # This can be frustratingly slow :-( - # Also note: Braintree will push you to their new GraphQL API, - # but it, too, paginates with a max of 50 records - logger.debug("Braintree subscriptions iterating to build subscriptions table") - return Table( - [self._subscription_header(include_transactions)] - + [ - self._subscription_to_row(include_transactions, r) - for r in collection.items - ] - ) - - def get_transactions( - self, - table_of_ids=None, - disbursement_start_date=None, - disbursement_end_date=None, - query_list=None, - query_dict=None, - just_ids=False, - ): + raise ParsonsBraintreeError(f"Braintree dispute query failed: {collection.message}") + return Table([ + self._dispute_header() + ] + [self._dispute_to_row(r) for r in collection.disputes.items]) + + def get_transactions(self, + table_of_ids=None, + disbursement_start_date=None, disbursement_end_date=None, + query_list=None, + query_dict=None, + just_ids=False): """ Get a table of transactions based on query parameters. There are three ways to pass query arguments: @@ -392,39 +250,33 @@ def get_transactions( Table Class """ collection = self._get_collection( - "transaction", + 'transaction', table_of_ids=table_of_ids, query_list=query_list, query_dict=query_dict, default_query=( - { - "disbursement_date": dict( - between=[disbursement_start_date, disbursement_end_date] - ) - } + {'disbursement_date': dict( + between=[disbursement_start_date, disbursement_end_date])} if disbursement_start_date and disbursement_end_date else None - ), - ) + )) query_count = len(collection.ids) - logger.info( - f"Braintree transactions resulted in transaction count of {query_count}" - ) + logger.info(f'Braintree transactions resulted in transaction count of {query_count}') if just_ids: - return Table([("id",)] + [[item_id] for item_id in collection.ids]) + return Table([('id',)] + + [[item_id] for item_id in collection.ids]) # Iterating on collection.items triggers web requests in batches of 50 records # This can be frustratingly slow :-( # Also note: Braintree will push you to their new GraphQL API, # but it, too, paginates with a max of 50 records - logger.debug("Braintree transactions iterating to build transaction table") - return Table( - [self._transaction_header()] - + [self._transaction_to_row(r) for r in collection.items] - ) + logger.debug('Braintree transactions iterating to build transaction table') + return Table([ + self._transaction_header() + ] + [self._transaction_to_row(r) for r in collection.items]) def _dispute_header(self): - return self.dispute_fields + ["transaction_id"] + return self.dispute_fields + ['transaction_id'] def _dispute_to_row(self, collection_item): row = [getattr(collection_item, k) for k in self.dispute_fields] @@ -434,73 +286,25 @@ def _dispute_to_row(self, collection_item): def _transaction_header(self): return ( - [f"credit_card_{k}" for k in self.credit_card_fields] + [f'credit_card_{k}' for k in self.credit_card_fields] # annoying exception in column name - + [ - (f"disbursement_{k}" if k != "disbursement_date" else k) - for k in self.disbursement_fields - ] - + [f"customer_{k}" for k in self.customer_fields] - + self.transaction_fields - ) + + [(f'disbursement_{k}' if k != 'disbursement_date' else k) + for k in self.disbursement_fields] + + self.transaction_fields) def _transaction_to_row(self, collection_item): return ( - [ - ( - collection_item.credit_card.get(k) - if getattr(collection_item, "credit_card", None) - else None - ) - for k in self.credit_card_fields - ] - + [ - getattr(collection_item.disbursement_details, k) - for k in self.disbursement_fields - ] - + [ - getattr(collection_item.customer_details, k) - for k in self.customer_fields - ] - + [getattr(collection_item, k) for k in self.transaction_fields] - ) - - def _subscription_header(self, include_transactions): - if include_transactions: - return ( - [f"descriptor_{k}" for k in self.descriptor_fields] - + self.subscription_fields - + ["transactions"] - ) - else: - return ( - [f"descriptor_{k}" for k in self.descriptor_fields] - + self.subscription_fields - + ["transaction_ids"] - ) - - def _subscription_to_row(self, include_transactions, collection_item): - if include_transactions: - return ( - [getattr(collection_item.descriptor, k) for k in self.descriptor_fields] - + [getattr(collection_item, k) for k in self.subscription_fields] - + [collection_item.transactions] - ) - else: - return ( - [getattr(collection_item.descriptor, k) for k in self.descriptor_fields] - + [getattr(collection_item, k) for k in self.subscription_fields] - + [";".join(t.id for t in collection_item.transactions)] - ) - - def _get_collection( - self, - query_type, - table_of_ids=None, - query_list=None, - query_dict=None, - default_query=None, - ): + [(collection_item.credit_card.get(k) + if getattr(collection_item, 'credit_card', None) else None) + for k in self.credit_card_fields] + + [getattr(collection_item.disbursement_details, k) for k in self.disbursement_fields] + + [getattr(collection_item, k) for k in self.transaction_fields]) + + def _get_collection(self, query_type, + table_of_ids=None, + query_list=None, + query_dict=None, + default_query=None): collection_query = None collection = None if query_list: @@ -509,17 +313,15 @@ def _get_collection( collection_query = self._get_query_objects(query_type, **query_dict) elif default_query: collection_query = self._get_query_objects(query_type, **default_query) + if not collection_query: raise ParsonsBraintreeError( "You must pass some query parameters: " - "query_dict, start_date with end_date, or query_list" - ) + "query_dict, start_date with end_date, or query_list") if table_of_ids: # We don't need to re-do the query, we can just reconstruct the query object - collection = self._create_collection( - query_type, table_of_ids.table.values("id"), collection_query - ) + collection = self._create_collection(table_of_ids.table.values('id'), collection_query) else: collection = getattr(self.gateway, query_type).search(*collection_query) return collection @@ -541,9 +343,7 @@ def _get_query_objects(self, query_type, **queryparams): for qual, vals in filters.items(): # likely only one, but fine queryobj_qualfunc = getattr(queryobj, qual, None) if not queryobj_qualfunc: - raise ParsonsBraintreeError( - "oh no, that's not a braintree parameter" - ) + raise ParsonsBraintreeError("oh no, that's not a braintree parameter") if not isinstance(vals, list): vals = [vals] queries.append(queryobj_qualfunc(*vals)) @@ -551,18 +351,9 @@ def _get_query_objects(self, query_type, **queryparams): raise ParsonsBraintreeError("oh no, that's not a braintree parameter") return queries - def _create_collection(self, query_type, ids, queries): - if (query_type == "transaction") or (query_type == "disbursement"): - gateway = braintree.TransactionGateway(self.gateway) - return braintree.ResourceCollection( - queries, - {"search_results": {"ids": list(ids), "page_size": 50}}, - method=gateway._TransactionGateway__fetch, - ) - if query_type == "subscription": - gateway = braintree.SubscriptionGateway(self.gateway) - return braintree.ResourceCollection( - queries, - {"search_results": {"ids": list(ids), "page_size": 50}}, - method=gateway._SubscriptionGateway__fetch, - ) + def _create_collection(self, ids, queries): + transaction_gateway = braintree.TransactionGateway(self.gateway) + return braintree.ResourceCollection( + queries, + {'search_results': {'ids': list(ids), 'page_size': 50}}, + method=transaction_gateway._TransactionGateway__fetch) diff --git a/parsons/capitol_canary/__init__.py b/parsons/capitol_canary/__init__.py deleted file mode 100644 index d1a39596cb..0000000000 --- a/parsons/capitol_canary/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from parsons.capitol_canary.capitol_canary import CapitolCanary - -__all__ = ["CapitolCanary"] diff --git a/parsons/capitol_canary/capitol_canary.py b/parsons/capitol_canary/capitol_canary.py deleted file mode 100644 index cf08fb94ff..0000000000 --- a/parsons/capitol_canary/capitol_canary.py +++ /dev/null @@ -1,396 +0,0 @@ -from requests.auth import HTTPBasicAuth -from parsons.etl import Table -from parsons.utilities import check_env -from parsons.utilities.api_connector import APIConnector -from parsons.utilities.datetime import date_to_timestamp -import logging - -logger = logging.getLogger(__name__) - -CAPITOL_CANARY_URI = "https://api.phone2action.com/2.0/" - - -class CapitolCanary(object): - """ - Instantiate CapitolCanary Class - - `Args:` - app_id: str - The CapitolCanary provided application id. Not required if ``CAPITOLCANARY_APP_ID`` - env variable set. - app_key: str - The CapitolCanary provided application key. Not required if ``CAPITOLCANARY_APP_KEY`` - env variable set. - `Returns:` - CapitolCanary Class - """ - - def __init__(self, app_id=None, app_key=None): - # check first for CapitolCanary branded app key and ID - cc_app_id = check_env.check("CAPITOLCANARY_APP_ID", None, optional=True) - cc_app_key = check_env.check("CAPITOLCANARY_APP_KEY", None, optional=True) - - self.app_id = cc_app_id or check_env.check("PHONE2ACTION_APP_ID", app_id) - self.app_key = cc_app_key or check_env.check("PHONE2ACTION_APP_KEY", app_key) - self.auth = HTTPBasicAuth(self.app_id, self.app_key) - self.client = APIConnector(CAPITOL_CANARY_URI, auth=self.auth) - - def _paginate_request(self, url, args=None, page=None): - # Internal pagination method - - if page is not None: - args["page"] = page - - r = self.client.get_request(url, params=args) - - json = r["data"] - - if page is not None: - return json - - # If count of items is less than the total allowed per page, paginate - while r["pagination"]["count"] == r["pagination"]["per_page"]: - - r = self.client.get_request(r["pagination"]["next_url"], args) - json.extend(r["data"]) - - return json - - def get_advocates( - self, state=None, campaign_id=None, updated_since=None, page=None - ): - """ - Return advocates (person records). - - If no page is specified, the method will automatically paginate through the available - advocates. - - `Args:` - state: str - Filter by US postal abbreviation for a state - or territory e.g., "CA" "NY" or "DC" - campaign_id: int - Filter to specific campaign - updated_since: str or int or datetime - Fetch all advocates updated since the date provided; this can be a datetime - object, a UNIX timestamp, or a date string (ex. '2014-01-05 23:59:43') - page: int - Page number of data to fetch; if this is specified, call will only return one - page. - `Returns:` - A dict of parsons tables: - * emails - * phones - * memberships - * tags - * ids - * fields - * advocates - """ - - # Convert the passed in updated_since into a Unix timestamp (which is what the API wants) - updated_since = date_to_timestamp(updated_since) - - args = { - "state": state, - "campaignid": campaign_id, - "updatedSince": updated_since, - } - - logger.info("Retrieving advocates...") - json = self._paginate_request("advocates", args=args, page=page) - - return self._advocates_tables(Table(json)) - - def _advocates_tables(self, tbl): - # Convert the advocates nested table into multiple tables - - tbls = { - "advocates": tbl, - "emails": Table(), - "phones": Table(), - "memberships": Table(), - "tags": Table(), - "ids": Table(), - "fields": Table(), - } - - if not tbl: - return tbls - - logger.info(f"Retrieved {tbl.num_rows} advocates...") - - # Unpack all of the single objects - # The CapitolCanary API docs says that created_at and updated_at are dictionaries, but - # the data returned from the server is a ISO8601 timestamp. - EHS, 05/21/2020 - for c in ["address", "districts"]: - tbl.unpack_dict(c) - - # Unpack all of the arrays - child_tables = [child for child in tbls.keys() if child != "advocates"] - for c in child_tables: - tbls[c] = tbl.long_table(["id"], c, key_rename={"id": "advocate_id"}) - - return tbls - - def get_campaigns( - self, - state=None, - zip=None, - include_generic=False, - include_private=False, - include_content=True, - ): - """ - Returns a list of campaigns - - `Args:` - state: str - Filter by US postal abbreviation for a state or territory e.g., "CA" "NY" or "DC" - zip: int - Filter by 5 digit zip code - include_generic: boolean - When filtering by state or ZIP code, include unrestricted campaigns - include_private: boolean - If true, will include private campaigns in results - include_content: boolean - If true, include campaign content fields, which may vary. This may cause - sync errors. - `Returns:` - Parsons Table - See :ref:`parsons-table` for output options. - """ - - args = { - "state": state, - "zip": zip, - "includeGeneric": str(include_generic), - "includePrivate": str(include_private), - } - - tbl = Table(self.client.get_request("campaigns", params=args)) - if tbl: - tbl.unpack_dict("updated_at") - if include_content: - tbl.unpack_dict("content") - - return tbl - - def create_advocate( - self, - campaigns, - first_name=None, - last_name=None, - email=None, - phone=None, - address1=None, - address2=None, - city=None, - state=None, - zip5=None, - sms_optin=None, - email_optin=None, - sms_optout=None, - email_optout=None, - **kwargs, - ): - """ - Create an advocate. - - If you want to opt an advocate into or out of SMS / email campaigns, you must provide - the email address or phone number (accordingly). - - The list of arguments only partially covers the fields that can be set on the advocate. - For a complete list of fields that can be updated, see - `the CapitolCanary API documentation `_. - - `Args:` - campaigns: list - The ID(s) of campaigns to add the advocate to - first_name: str - `Optional`; The first name of the advocate - last_name: str - `Optional`; The last name of the advocate - email: str - `Optional`; An email address to add for the advocate. One of ``email`` or ``phone`` - is required. - phone: str - `Optional`; An phone # to add for the advocate. One of ``email`` or ``phone`` is - required. - address1: str - `Optional`; The first line of the advocates' address - address2: str - `Optional`; The second line of the advocates' address - city: str - `Optional`; The city of the advocates address - state: str - `Optional`; The state of the advocates address - zip5: str - `Optional`; The 5 digit Zip code of the advocate - sms_optin: boolean - `Optional`; Whether to opt the advocate into receiving text messages; an SMS - confirmation text message will be sent. You must provide values for the ``phone`` - and ``campaigns`` arguments. - email_optin: boolean - `Optional`; Whether to opt the advocate into receiving emails. You must provide - values for the ``email`` and ``campaigns`` arguments. - sms_optout: boolean - `Optional`; Whether to opt the advocate out of receiving text messages. You must - provide values for the ``phone`` and ``campaigns`` arguments. Once an advocate is - opted out, they cannot be opted back in. - email_optout: boolean - `Optional`; Whether to opt the advocate out of receiving emails. You must - provide values for the ``email`` and ``campaigns`` arguments. Once an advocate is - opted out, they cannot be opted back in. - **kwargs: - Additional fields on the advocate to update - `Returns:` - The int ID of the created advocate. - """ - - # Validate the passed in arguments - - if not campaigns: - raise ValueError( - "When creating an advocate, you must specify one or more campaigns." - ) - - if not email and not phone: - raise ValueError( - "When creating an advocate, you must provide an email address or a phone number." - ) - - if (sms_optin or sms_optout) and not phone: - raise ValueError( - "When opting an advocate in or out of SMS messages, you must specify a valid " - "phone and one or more campaigns" - ) - - if (email_optin or email_optout) and not email: - raise ValueError( - "When opting an advocate in or out of email messages, you must specify a valid " - "email address and one or more campaigns" - ) - - # Align our arguments with the expected parameters for the API - payload = { - "email": email, - "phone": phone, - "firstname": first_name, - "lastname": last_name, - "address1": address1, - "address2": address2, - "city": city, - "state": state, - "zip5": zip5, - "smsOptin": 1 if sms_optin else None, - "emailOptin": 1 if email_optin else None, - "smsOptout": 1 if sms_optout else None, - "emailOptout": 1 if email_optout else None, - } - - # Clean up any keys that have a "None" value - payload = {key: val for key, val in payload.items() if val is not None} - - # Merge in any kwargs - payload.update(kwargs) - - # Turn into a list of items so we can append multiple campaigns - campaign_keys = [("campaigns[]", val) for val in campaigns] - data = [(key, value) for key, value in payload.items()] + campaign_keys - - # Call into the CapitolCanary API - response = self.client.post_request("advocates", data=data) - return response["advocateid"] - - def update_advocate( - self, - advocate_id, - campaigns=None, - email=None, - phone=None, - sms_optin=None, - email_optin=None, - sms_optout=None, - email_optout=None, - **kwargs, - ): - """ - Update the fields of an advocate. - - If you want to opt an advocate into or out of SMS / email campaigns, you must provide - the email address or phone number along with a list of campaigns. - - The list of arguments only partially covers the fields that can be updated on the advocate. - For a complete list of fields that can be updated, see - `the CapitolCanary API documentation `_. - - `Args:` - advocate_id: integer - The ID of the advocate being updates - campaigns: list - `Optional`; The ID(s) of campaigns to add the user to - email: str - `Optional`; An email address to add for the advocate (or to use when opting in/out) - phone: str - `Optional`; An phone # to add for the advocate (or to use when opting in/out) - sms_optin: boolean - `Optional`; Whether to opt the advocate into receiving text messages; an SMS - confirmation text message will be sent. You must provide values for the ``phone`` - and ``campaigns`` arguments. - email_optin: boolean - `Optional`; Whether to opt the advocate into receiving emails. You must provide - values for the ``email`` and ``campaigns`` arguments. - sms_optout: boolean - `Optional`; Whether to opt the advocate out of receiving text messages. You must - provide values for the ``phone`` and ``campaigns`` arguments. Once an advocate is - opted out, they cannot be opted back in. - email_optout: boolean - `Optional`; Whether to opt the advocate out of receiving emails. You must - provide values for the ``email`` and ``campaigns`` arguments. Once an advocate is - opted out, they cannot be opted back in. - **kwargs: - Additional fields on the advocate to update - """ - - # Validate the passed in arguments - if (sms_optin or sms_optout) and not (phone and campaigns): - raise ValueError( - "When opting an advocate in or out of SMS messages, you must specify a valid " - "phone and one or more campaigns" - ) - - if (email_optin or email_optout) and not (email and campaigns): - raise ValueError( - "When opting an advocate in or out of email messages, you must specify a valid " - "email address and one or more campaigns" - ) - - # Align our arguments with the expected parameters for the API - payload = { - "advocateid": advocate_id, - "campaigns": campaigns, - "email": email, - "phone": phone, - "smsOptin": 1 if sms_optin else None, - "emailOptin": 1 if email_optin else None, - "smsOptout": 1 if sms_optout else None, - "emailOptout": 1 if email_optout else None, - # remap first_name / last_name to be consistent with updated_advocates - "firstname": kwargs.pop("first_name", None), - "lastname": kwargs.pop("last_name", None), - } - - # Clean up any keys that have a "None" value - payload = {key: val for key, val in payload.items() if val is not None} - - # Merge in any kwargs - payload.update(kwargs) - - # Turn into a list of items so we can append multiple campaigns - campaigns = campaigns or [] - campaign_keys = [("campaigns[]", val) for val in campaigns] - data = [(key, value) for key, value in payload.items()] + campaign_keys - - # Call into the CapitolCanary API - self.client.post_request("advocates", data=data) diff --git a/parsons/civis/__init__.py b/parsons/civis/__init__.py index 16b9cd291f..cfbc19b2ba 100644 --- a/parsons/civis/__init__.py +++ b/parsons/civis/__init__.py @@ -1,3 +1,5 @@ from parsons.civis.civisclient import CivisClient -__all__ = ["CivisClient"] +__all__ = [ + 'CivisClient' +] diff --git a/parsons/civis/civisclient.py b/parsons/civis/civisclient.py index 41519182a4..21e8241abe 100644 --- a/parsons/civis/civisclient.py +++ b/parsons/civis/civisclient.py @@ -21,8 +21,8 @@ class CivisClient(object): def __init__(self, db=None, api_key=None, **kwargs): - self.db = check_env.check("CIVIS_DATABASE", db) - self.api_key = check_env.check("CIVIS_API_KEY", api_key) + self.db = check_env.check('CIVIS_DATABASE', db) + self.api_key = check_env.check('CIVIS_API_KEY', api_key) self.client = civis.APIClient(api_key=api_key, **kwargs) """ The Civis API client. Utilize this attribute to access to lower level and more @@ -30,9 +30,7 @@ def __init__(self, db=None, api_key=None, **kwargs): can be found by reading the Civis API client `documentation `_. """ # noqa: E501 - def query( - self, sql, preview_rows=10, polling_interval=None, hidden=True, wait=True - ): + def query(self, sql, preview_rows=10, polling_interval=None, hidden=True, wait=True): """ Execute a SQL statement as a Civis query. @@ -58,39 +56,24 @@ def query( See :ref:`parsons-table` for output options. """ - fut = civis.io.query_civis( - sql, - self.db, - preview_rows=preview_rows, - polling_interval=polling_interval, - hidden=hidden, - ) + fut = civis.io.query_civis(sql, self.db, preview_rows=preview_rows, + polling_interval=polling_interval, hidden=hidden) if not wait: return fut result = fut.result() - if result["result_rows"] is None: + if result['result_rows'] is None: return None - result["result_rows"].insert(0, result["result_columns"]) - - return Table(result["result_rows"]) - - def table_import( - self, - table_obj, - table, - max_errors=None, - existing_table_rows="fail", - diststyle=None, - distkey=None, - sortkey1=None, - sortkey2=None, - wait=True, - **civisargs - ): + result['result_rows'].insert(0, result['result_columns']) + + return Table(result['result_rows']) + + def table_import(self, table_obj, table, max_errors=None, + existing_table_rows='fail', diststyle=None, distkey=None, + sortkey1=None, sortkey2=None, wait=True, **civisargs): """ Write the table to a Civis Redshift cluster. Additional key word arguments can passed to `civis.io.dataframe_to_civis() `_ # noqa: E501 @@ -124,19 +107,12 @@ def table_import( ``None`` or ``civis.CivisFuture`` """ # noqa: E501,E261 - fut = civis.io.dataframe_to_civis( - table_obj.to_dataframe(), - database=self.db, - table=table, - max_errors=max_errors, - existing_table_rows=existing_table_rows, - diststyle=diststyle, - distkey=distkey, - sortkey1=sortkey1, - sortkey2=sortkey2, - headers=True, - **civisargs - ) + fut = civis.io.dataframe_to_civis(table_obj.to_dataframe(), database=self.db, + table=table, max_errors=max_errors, + existing_table_rows=existing_table_rows, + diststyle=diststyle, distkey=distkey, + sortkey1=sortkey1, sortkey2=sortkey2, + headers=True, **civisargs) if wait: return fut.result() diff --git a/parsons/controlshift/__init__.py b/parsons/controlshift/__init__.py index 20dabcdcf6..060df3b129 100644 --- a/parsons/controlshift/__init__.py +++ b/parsons/controlshift/__init__.py @@ -1,3 +1,5 @@ from parsons.controlshift.controlshift import Controlshift -__all__ = ["Controlshift"] +__all__ = [ + 'Controlshift' +] diff --git a/parsons/controlshift/controlshift.py b/parsons/controlshift/controlshift.py index 51608c6770..f4ce3a8bfc 100644 --- a/parsons/controlshift/controlshift.py +++ b/parsons/controlshift/controlshift.py @@ -26,14 +26,14 @@ class Controlshift(object): def __init__(self, hostname=None, client_id=None, client_secret=None): - self.hostname = check_env.check("CONTROLSHIFT_HOSTNAME", hostname) - token_url = f"{self.hostname}/oauth/token" + self.hostname = check_env.check('CONTROLSHIFT_HOSTNAME', hostname) + token_url = f'{self.hostname}/oauth/token' self.client = OAuth2APIConnector( self.hostname, - client_id=check_env.check("CONTROLSHIFT_CLIENT_ID", client_id), - client_secret=check_env.check("CONTROLSHIFT_CLIENT_SECRET", client_secret), + client_id=check_env.check('CONTROLSHIFT_CLIENT_ID', client_id), + client_secret=check_env.check('CONTROLSHIFT_CLIENT_SECRET', client_secret), token_url=token_url, - auto_refresh_url=token_url, + auto_refresh_url=token_url ) def get_petitions(self) -> Table: @@ -48,10 +48,9 @@ def get_petitions(self) -> Table: petitions = [] while next_page: response = self.client.get_request( - f"{self.hostname}/api/v1/petitions", {"page": next_page} - ) - next_page = response["meta"]["next_page"] - petitions.extend(response["petitions"]) + f'{self.hostname}/api/v1/petitions', {'page': next_page}) + next_page = response['meta']['next_page'] + petitions.extend(response['petitions']) return Table(petitions) diff --git a/parsons/copper/__init__.py b/parsons/copper/__init__.py index 933eba4e72..5108d125bd 100644 --- a/parsons/copper/__init__.py +++ b/parsons/copper/__init__.py @@ -1,3 +1,5 @@ from parsons.copper.copper import Copper -__all__ = ["Copper"] +__all__ = [ + 'Copper' +] diff --git a/parsons/copper/copper.py b/parsons/copper/copper.py index b81aa23e83..45ddb77334 100644 --- a/parsons/copper/copper.py +++ b/parsons/copper/copper.py @@ -28,8 +28,8 @@ class Copper(object): def __init__(self, user_email=None, api_key=None): - self.api_key = check_env.check("COPPER_API_KEY", api_key) - self.user_email = check_env.check("COPPER_USER_EMAIL", user_email) + self.api_key = check_env.check('COPPER_API_KEY', api_key) + self.user_email = check_env.check('COPPER_USER_EMAIL', user_email) self.uri = COPPER_URI def base_request(self, endpoint, req_type, page=1, page_size=200, filters=None): @@ -39,10 +39,10 @@ def base_request(self, endpoint, req_type, page=1, page_size=200, filters=None): # Authentication must be done through headers, requests HTTPBasicAuth doesn't work headers = { - "X-PW-AccessToken": self.api_key, - "X-PW-Application": "developer_api", - "X-PW-UserEmail": self.user_email, - "Content-Type": "application/json", + 'X-PW-AccessToken': self.api_key, + 'X-PW-Application': "developer_api", + 'X-PW-UserEmail': self.user_email, + 'Content-Type': "application/json" } payload = {} @@ -51,7 +51,7 @@ def base_request(self, endpoint, req_type, page=1, page_size=200, filters=None): payload.update(filters) # GET request with non-None data arg is malformed - if req_type == "GET": + if req_type == 'GET': return request(req_type, url, params=json.dumps(payload), headers=headers) else: payload["page_number"] = page @@ -69,11 +69,11 @@ def paginate_request(self, endpoint, req_type, page_size=200, filters=None): if isinstance(filters, dict): # Assume user wants just that page if page_number specified in filters - if "page_number" in filters: - page = filters["page_number"] + if 'page_number' in filters: + page = filters['page_number'] # Ensure exactly one loop total_pages = page - rows = f"{str(page_size)} or less" + rows = f'{str(page_size)} or less' only_page = True else: filters = {} @@ -81,15 +81,19 @@ def paginate_request(self, endpoint, req_type, page_size=200, filters=None): while page <= total_pages: r = self.base_request( - endpoint, req_type, page_size=page_size, page=page, filters=filters + endpoint, + req_type, + page_size=page_size, + page=page, + filters=filters ) if page == 1: - if "X-Pw-Total" in r.headers and not only_page: - rows = r.headers["X-Pw-Total"] - total_pages = int(math.ceil(int(rows) / float(page_size))) + if 'X-Pw-Total' in r.headers and not only_page: + rows = r.headers['X-Pw-Total'] + total_pages = int(math.ceil(int(rows)/float(page_size))) else: - rows = f"{str(page_size)} or less" + rows = f'{str(page_size)} or less' total_pages = 1 logger.info(f"Retrieving page {page} of {total_pages}, total rows: {rows}") page += 1 @@ -128,7 +132,7 @@ def get_people(self, filters=None, tidy=False): * people_custom_fields * people_socials * people_websites - """ # noqa: E501,E261 + """ # noqa: E501,E261 return self.get_standard_object("people", filters=filters, tidy=tidy) @@ -153,7 +157,7 @@ def get_companies(self, filters=None, tidy=False): * companies_custom_fields * companies_socials * companies_websites - """ # noqa: E501,E261 + """ # noqa: E501,E261 return self.get_standard_object("companies", filters=filters, tidy=tidy) @@ -173,7 +177,7 @@ def get_activities(self, filters=None, tidy=False): `Returns:` List of dicts of Parsons Tables: * activities - """ # noqa: E501,E261 + """ # noqa: E501,E261 return self.get_standard_object("activities", filters=filters, tidy=tidy) @@ -194,7 +198,7 @@ def get_opportunities(self, filters=None, tidy=False): List of dicts of Parsons Tables: * opportunities * opportunities_custom_fields - """ # noqa: E501,E261 + """ # noqa: E501,E261 return self.get_standard_object("opportunities", filters=filters, tidy=tidy) @@ -202,9 +206,7 @@ def get_standard_object(self, object_name, filters=None, tidy=False): # Retrieve and process a standard endpoint object (e.g. people, companies, etc.) logger.info(f"Retrieving {object_name} records.") - blob = self.paginate_request( - f"/{object_name}/search", req_type="POST", filters=filters - ) + blob = self.paginate_request(f"/{object_name}/search", req_type='POST', filters=filters) return self.process_json(blob, object_name, tidy=tidy) @@ -222,10 +224,10 @@ def get_custom_fields(self): * custom_fields * custom_fields_available * custom_fields_options - """ # noqa: E501,E261 + """ # noqa: E501,E261 logger.info("Retrieving custom fields.") - blob = self.paginate_request("/custom_field_definitions/", req_type="GET") + blob = self.paginate_request('/custom_field_definitions/', req_type='GET') return self.process_custom_fields(blob) def get_activity_types(self): @@ -240,17 +242,17 @@ def get_activity_types(self): `Returns:` List of dicts of Parsons Tables: * activitiy_types - """ # noqa: E501,E261 + """ # noqa: E501,E261 logger.info("Retrieving activity types.") - response = self.paginate_request("/activity_types/", req_type="GET") + response = self.paginate_request('/activity_types/', req_type='GET') orig_table = Table(response) - at_user = orig_table.long_table([], "user", prepend=False) - at_sys = orig_table.long_table([], "system", prepend=False) + at_user = orig_table.long_table([], 'user', prepend=False) + at_sys = orig_table.long_table([], 'system', prepend=False) Table.concat(at_sys, at_user) - return [{"name": "activity_types", "tbl": at_sys}] + return [{'name': 'activity_types', 'tbl': at_sys}] def get_contact_types(self): """ @@ -264,9 +266,9 @@ def get_contact_types(self): `Returns:` Parsons Table See :ref:`parsons-table` for output options. - """ # noqa: E501,E261 + """ # noqa: E501,E261 - response = self.paginate_request("/contact_types/", req_type="GET") + response = self.paginate_request('/contact_types/', req_type='GET') return Table(response) def process_json(self, json_blob, obj_type, tidy=False): @@ -278,8 +280,8 @@ def process_json(self, json_blob, obj_type, tidy=False): # Original table & columns obj_table = Table(json_blob) cols = obj_table.get_columns_type_stats() - list_cols = [x["name"] for x in cols if "list" in x["type"]] - dict_cols = [x["name"] for x in cols if "dict" in x["type"]] + list_cols = [x['name'] for x in cols if 'list' in x['type']] + dict_cols = [x['name'] for x in cols if 'dict' in x['type']] # Unpack all list columns if len(list_cols) > 0: @@ -291,49 +293,46 @@ def process_json(self, json_blob, obj_type, tidy=False): ) # Add separate long table for each column with nested data if list_rows.num_rows > 0: - logger.debug(l, "is a nested column") - if len([x for x in cols if x["name"] == l]) == 1: - table_list.append( - { - "name": f"{obj_type}_{l}", - "tbl": obj_table.long_table(["id"], l), - } - ) + logger.debug(l, 'is a nested column') + if len([x for x in cols if x['name'] == l]) == 1: + table_list.append({ + 'name': f'{obj_type}_{l}', + 'tbl': obj_table.long_table(['id'], l) + }) else: # Ignore if column doesn't exist (or has multiples) continue else: if tidy is False: - logger.debug(l, "is a normal list column") + logger.debug(l, 'is a normal list column') obj_table.unpack_list(l) # Unpack all dict columns if len(dict_cols) > 0 and tidy is False: for d in dict_cols: - logger.debug(d, "is a dict column") + logger.debug(d, 'is a dict column') obj_table.unpack_dict(d) if tidy is not False: packed_cols = list_cols + dict_cols for p in packed_cols: if p in obj_table.columns: - logger.debug(p, "needs to be unpacked into rows") + logger.debug(p, 'needs to be unpacked into rows') # Determine whether or not to expand based on tidy - unpacked_tidy = obj_table.unpack_nested_columns_as_rows( - p, expand_original=tidy - ) + unpacked_tidy = obj_table.unpack_nested_columns_as_rows(p, expand_original=tidy) # Check if column was removed as sign it was unpacked into separate table if p not in obj_table.columns: - table_list.append( - {"name": f"{obj_type}_{p}", "tbl": unpacked_tidy} - ) + table_list.append({ + 'name': f'{obj_type}_{p}', + 'tbl': unpacked_tidy + }) else: obj_table = unpacked_tidy # Original table will have had all nested columns removed if len(obj_table.columns) > 1: - table_list.append({"name": obj_type, "tbl": obj_table}) + table_list.append({'name': obj_type, 'tbl': obj_table}) return table_list @@ -344,13 +343,11 @@ def process_custom_fields(self, json_blob): custom_fields = Table(json_blob) # Available On - available_on = custom_fields.long_table(["id"], "available_on") + available_on = custom_fields.long_table(['id'], 'available_on') # Options - options = custom_fields.long_table(["id", "name"], "options") + options = custom_fields.long_table(['id', 'name'], 'options') - return [ - {"name": "custom_fields", "tbl": custom_fields}, - {"name": "custom_fields_available", "tbl": available_on}, - {"name": "custom_fields_options", "tbl": options}, - ] + return [{'name': 'custom_fields', 'tbl': custom_fields}, + {'name': 'custom_fields_available', 'tbl': available_on}, + {'name': 'custom_fields_options', 'tbl': options}] diff --git a/parsons/crowdtangle/__init__.py b/parsons/crowdtangle/__init__.py index 20bc42b946..10e1b434fc 100644 --- a/parsons/crowdtangle/__init__.py +++ b/parsons/crowdtangle/__init__.py @@ -1,3 +1,5 @@ from parsons.crowdtangle.crowdtangle import CrowdTangle -__all__ = ["CrowdTangle"] +__all__ = [ + 'CrowdTangle' +] diff --git a/parsons/crowdtangle/crowdtangle.py b/parsons/crowdtangle/crowdtangle.py index 1f5841c9d7..4c0fefd15c 100644 --- a/parsons/crowdtangle/crowdtangle.py +++ b/parsons/crowdtangle/crowdtangle.py @@ -25,29 +25,30 @@ class CrowdTangle(object): def __init__(self, api_key=None): - self.api_key = check_env.check("CT_API_KEY", api_key) + self.api_key = check_env.check('CT_API_KEY', api_key) self.uri = CT_URI - def _base_request(self, endpoint, req_type="GET", args=None): + def _base_request(self, endpoint, req_type='GET', args=None): - url = f"{self.uri}/{endpoint}" - base_args = {"token": self.api_key, "count": PAGE_SIZE} + url = f'{self.uri}/{endpoint}' + base_args = {'token': self.api_key, + 'count': PAGE_SIZE} # Add any args passed through to the base args if args is not None: base_args.update(args) r = request(req_type, url, params=base_args).json() - json = r["result"] + json = r['result'] keys = list(json.keys()) data = json[keys[0]] - while "nextPage" in list(json["pagination"].keys()): + while 'nextPage' in list(json['pagination'].keys()): logger.info(f"Retrieving {PAGE_SIZE} rows.") time.sleep(REQUEST_SLEEP) - next_url = json["pagination"]["nextPage"] + next_url = json['pagination']['nextPage'] r = request(req_type, next_url).json() - json = r["result"] + json = r['result'] data.extend(json[keys[0]]) logger.info(f"Retrieved {len(data)} rows.") @@ -57,7 +58,7 @@ def _base_request(self, endpoint, req_type="GET", args=None): def _base_unpack(self, ParsonsTable): logger.debug("Working to unpack the Parsons Table...") - logger.debug(f"Starting with {len(ParsonsTable.columns)} columns...") + logger.debug(f'Starting with {len(ParsonsTable.columns)} columns...') sample = ParsonsTable[0] col_dict = {} @@ -70,7 +71,7 @@ def _base_unpack(self, ParsonsTable): elif col_dict[x] == "": ParsonsTable.unpack_list(col) - logger.info(f"There are now {len(ParsonsTable.columns)} columns...") + logger.info(f'There are now {len(ParsonsTable.columns)} columns...') return ParsonsTable def _unpack(self, ParsonsTable): @@ -91,20 +92,12 @@ def _unpack(self, ParsonsTable): def _list_to_string(self, list_arg): if list_arg: - return ",".join(list_arg) + return ','.join(list_arg) else: return None - def get_posts( - self, - start_date=None, - end_date=None, - language=None, - list_ids=None, - min_interations=None, - search_term=None, - types=None, - ): + def get_posts(self, start_date=None, end_date=None, language=None, list_ids=None, + min_interations=None, search_term=None, types=None): """ Return a set of posts for the given parameters. @@ -161,25 +154,21 @@ def get_posts( See :ref:`parsons-table` for output options. """ - args = { - "startDate": start_date, - "endDate": end_date, - "language": language, - "listIds": self._list_to_string(list_ids), - "minInteractions": min_interations, - "searchTerm": search_term, - "types": types, - } + args = {'startDate': start_date, + 'endDate': end_date, + 'language': language, + 'listIds': self._list_to_string(list_ids), + 'minInteractions': min_interations, + 'searchTerm': search_term, + 'types': types} logger.info("Retrieving posts.") - pt = Table(self._base_request("posts", args=args)) - logger.info(f"Retrieved {pt.num_rows} posts.") + pt = Table(self._base_request('posts', args=args)) + logger.info(f'Retrieved {pt.num_rows} posts.') self._unpack(pt) return pt - def get_leaderboard( - self, start_date=None, end_date=None, list_ids=None, account_ids=None - ): + def get_leaderboard(self, start_date=None, end_date=None, list_ids=None, account_ids=None): """ Return leaderboard data. @@ -207,21 +196,18 @@ def get_leaderboard( See :ref:`parsons-table` for output options. """ - args = { - "startDate": start_date, - "endDate": end_date, - "listIds": self._list_to_string(list_ids), - "accountIds": self._list_to_string(account_ids), - } + args = {'startDate': start_date, + 'endDate': end_date, + 'listIds': self._list_to_string(list_ids), + 'accountIds': self._list_to_string(account_ids)} - pt = Table(self._base_request("leaderboard", args=args)) - logger.info(f"Retrieved {pt.num_rows} records from the leaderbooard.") + pt = Table(self._base_request('leaderboard', args=args)) + logger.info(f'Retrieved {pt.num_rows} records from the leaderbooard.') self._unpack(pt) return pt - def get_links( - self, link, start_date=None, end_date=None, include_summary=None, platforms=None - ): + def get_links(self, link, start_date=None, end_date=None, include_summary=None, + platforms=None): """ Return up to 100 posts based on a specific link. It is strongly recommended to use the ``start_date`` parameter to limit queries to relevant dates. @@ -254,16 +240,14 @@ def get_links( See :ref:`parsons-table` for output options. """ - args = { - "link": link, - "startDate": start_date, - "endDate": end_date, - "includeSummary": str(include_summary), - "platforms": self._list_to_string(platforms), - } + args = {'link': link, + 'startDate': start_date, + 'endDate': end_date, + 'includeSummary': str(include_summary), + 'platforms': self._list_to_string(platforms)} logger.info("Retrieving posts based on link.") - pt = Table(self._base_request("links", args=args)) - logger.info(f"Retrieved {pt.num_rows} links.") + pt = Table(self._base_request('links', args=args)) + logger.info(f'Retrieved {pt.num_rows} links.') self._unpack(pt) return pt diff --git a/parsons/databases/alchemy.py b/parsons/databases/alchemy.py index 43729c3a52..80fe9234df 100644 --- a/parsons/databases/alchemy.py +++ b/parsons/databases/alchemy.py @@ -5,6 +5,7 @@ class Alchemy: + def generate_engine(self): """ Generate a SQL Alchemy engine. @@ -19,20 +20,18 @@ def generate_alchemy_url(self): https://docs.sqlalchemy.org/en/14/core/engines.html# """ - if self.dialect == "redshift" or self.dialect == "postgres": - connection_schema = "postgresql+psycopg2" - elif self.dialect == "mysql": - connection_schema = "mysql+mysqlconnector" + if self.dialect == 'redshift' or self.dialect == 'postgres': + connection_schema = 'postgresql+psycopg2' + elif self.dialect == 'mysql': + connection_schema = 'mysql+mysqlconnector' - params = [ - (self.username, self.username), - (self.password, f":{self.password}"), - (self.host, f"@{self.host}"), - (self.port, f":{self.port}"), - (self.db, f"/{self.db}"), - ] + params = [(self.username, self.username), + (self.password, f':{self.password}'), + (self.host, f'@{self.host}'), + (self.port, f':{self.port}'), + (self.db, f'/{self.db}')] - url = f"{connection_schema}://" + url = f'{connection_schema}://' for i in params: if i[0]: diff --git a/parsons/databases/database/__init__.py b/parsons/databases/database/__init__.py index 0b9e745aab..f3a0cc8df0 100644 --- a/parsons/databases/database/__init__.py +++ b/parsons/databases/database/__init__.py @@ -1,3 +1,5 @@ from parsons.databases.database.database import DatabaseCreateStatement -__all__ = ["DatabaseCreateStatement"] +__all__ = [ + 'DatabaseCreateStatement' +] diff --git a/parsons/databases/database/constants.py b/parsons/databases/database/constants.py index 8935a8734d..ec707ba455 100644 --- a/parsons/databases/database/constants.py +++ b/parsons/databases/database/constants.py @@ -1,153 +1,26 @@ # These are reserved words by Redshift and cannot be used as column names. RESERVED_WORDS = [ - "AES128", - "AES256", - "ALL", - "ALLOWOVERWRITE", - "ANALYSE", - "ANALYZE", - "AND", - "ANY", - "ARRAY", - "AS", - "ASC", - "AUTHORIZATION", - "BACKUP", - "BETWEEN", - "BINARY", - "BLANKSASNULL", - "BOTH", - "BYTEDICT", - "BZIP2", - "CASE", - "CAST", - "CHECK", - "COLLATE", - "COLUMN", - "CONSTRAINT", - "CREATE", - "CREDENTIALS", - "CROSS", - "CURRENT_DATE", - "CURRENT_TIME", - "CURRENT_TIMESTAMP", - "CURRENT_USER", - "CURRENT_USER_ID", - "DEFAULT", - "DEFERRABLE", - "DEFLATE", - "DEFRAG", - "DELTA", - "DELTA32K", - "DESC", - "DISABLE", - "DISTINCT", - "DO", - "ELSE", - "EMPTYASNULL", - "ENABLE", - "ENCODE", - "ENCRYPT", - "ENCRYPTION", - "END", - "EXCEPT", - "EXPLICIT", - "FALSE", - "FOR", - "FOREIGN", - "FREEZE", - "FROM", - "FULL", - "GLOBALDICT256", - "GLOBALDICT64K", - "GRANT", - "GROUP", - "GZIP", - "HAVING", - "IDENTITY", - "IGNORE", - "ILIKE", - "IN", - "INITIALLY", - "INNER", - "INTERSECT", - "INTO", - "IS", - "ISNULL", - "JOIN", - "LEADING", - "LEFT", - "LIKE", - "LIMIT", - "LOCALTIME", - "LOCALTIMESTAMP", - "LUN", - "LUNS", - "LZO", - "LZOP", - "MINUS", - "MOSTLY13", - "MOSTLY32", - "MOSTLY8", - "NATURAL", - "NEW", - "NOT", - "NOTNULL", - "NULL", - "NULLS", - "OFF", - "OFFLINE", - "OFFSET", - "OLD", - "ON", - "ONLY", - "OPEN", - "OR", - "ORDER", - "OUTER", - "OVERLAPS", - "PARALLEL", - "PARTITION", - "PERCENT", - "PERMISSIONS", - "PLACING", - "PRIMARY", - "RAW", - "READRATIO", - "RECOVER", - "REFERENCES", - "RESPECT", - "REJECTLOG", - "RESORT", - "RESTORE", - "RIGHT", - "SELECT", - "SESSION_USER", - "SIMILAR", - "SOME", - "SYSDATE", - "SYSTEM", - "TABLE", - "TAG", - "TDES", - "TEXT255", - "TEXT32K", - "THEN", - "TIMESTAMP", - "TO", - "TOP", - "TRAILING", - "TRUE", - "TRUNCATECOLUMNS", - "UNION", - "UNIQUE", - "USER", - "USING", - "VERBOSE", - "WALLET", - "WHEN", - "WHERE", - "WITH", + "AES128", "AES256", "ALL", "ALLOWOVERWRITE", "ANALYSE", "ANALYZE", "AND", + "ANY", "ARRAY", "AS", "ASC", "AUTHORIZATION", "BACKUP", "BETWEEN", "BINARY", + "BLANKSASNULL", "BOTH", "BYTEDICT", "BZIP2", "CASE", "CAST", "CHECK", + "COLLATE", "COLUMN", "CONSTRAINT", "CREATE", "CREDENTIALS", "CROSS", + "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", + "CURRENT_USER_ID", "DEFAULT", "DEFERRABLE", "DEFLATE", "DEFRAG", "DELTA", + "DELTA32K", "DESC", "DISABLE", "DISTINCT", "DO", "ELSE", "EMPTYASNULL", + "ENABLE", "ENCODE", "ENCRYPT", "ENCRYPTION", "END", "EXCEPT", "EXPLICIT", + "FALSE", "FOR", "FOREIGN", "FREEZE", "FROM", "FULL", "GLOBALDICT256", + "GLOBALDICT64K", "GRANT", "GROUP", "GZIP", "HAVING", "IDENTITY", "IGNORE", + "ILIKE", "IN", "INITIALLY", "INNER", "INTERSECT", "INTO", "IS", "ISNULL", + "JOIN", "LEADING", "LEFT", "LIKE", "LIMIT", "LOCALTIME", "LOCALTIMESTAMP", + "LUN", "LUNS", "LZO", "LZOP", "MINUS", "MOSTLY13", "MOSTLY32", "MOSTLY8", + "NATURAL", "NEW", "NOT", "NOTNULL", "NULL", "NULLS", "OFF", "OFFLINE", + "OFFSET", "OLD", "ON", "ONLY", "OPEN", "OR", "ORDER", "OUTER", "OVERLAPS", + "PARALLEL", "PARTITION", "PERCENT", "PERMISSIONS", "PLACING", "PRIMARY", + "RAW", "READRATIO", "RECOVER", "REFERENCES", "RESPECT", "REJECTLOG", + "RESORT", "RESTORE", "RIGHT", "SELECT", "SESSION_USER", "SIMILAR", "SOME", + "SYSDATE", "SYSTEM", "TABLE", "TAG", "TDES", "TEXT255", "TEXT32K", "THEN", + "TIMESTAMP", "TO", "TOP", "TRAILING", "TRUE", "TRUNCATECOLUMNS", "UNION", + "UNIQUE", "USER", "USING", "VERBOSE", "WALLET", "WHEN", "WHERE", "WITH", "WITHOUT", ] diff --git a/parsons/databases/database/database.py b/parsons/databases/database/database.py index 9f369d43c9..1b71726266 100644 --- a/parsons/databases/database/database.py +++ b/parsons/databases/database/database.py @@ -2,7 +2,8 @@ import ast -class DatabaseCreateStatement: +class DatabaseCreateStatement(): + def __init__(self): self.INT_TYPES = consts.INT_TYPES self.SMALLINT = consts.SMALLINT @@ -103,11 +104,8 @@ def is_valid_sql_num(self, val): # then it's a valid sql number # Also check the first character is not zero try: - if ( - (float(val) or 1) - and "_" not in val - and (val in ("0", "0.0") or val[0] != "0") - ): + if ((float(val) or 1) and "_" not in val and + (val in ("0", "0.0") or val[0] != "0")): return True else: return False @@ -134,9 +132,8 @@ def is_sql_bool(self, val): return if isinstance(val, bool) or ( - type(val) in (int, str) - and str(val).upper() in self.TRUE_VALS + self.FALSE_VALS - ): + type(val) in (int, str) and + str(val).upper() in self.TRUE_VALS + self.FALSE_VALS): return True return False @@ -208,11 +205,11 @@ def detect_data_type(self, value, cmp_type=None): if type_lit == int and cmp_type in (self.INT_TYPES + [None, "", self.BOOL]): # Use smallest possible int type above TINYINT - if self.SMALLINT_MIN < val_lit < self.SMALLINT_MAX: + if (self.SMALLINT_MIN < val_lit < self.SMALLINT_MAX): return self.get_bigger_int(self.SMALLINT, cmp_type) - elif self.MEDIUMINT_MIN < val_lit < self.MEDIUMINT_MAX: + elif (self.MEDIUMINT_MIN < val_lit < self.MEDIUMINT_MAX): return self.get_bigger_int(self.MEDIUMINT, cmp_type) - elif self.INT_MIN < val_lit < self.INT_MAX: + elif (self.INT_MIN < val_lit < self.INT_MAX): return self.get_bigger_int(self.INT, cmp_type) else: return self.BIGINT @@ -264,7 +261,7 @@ def format_column(self, col, index="", replace_chars=None, col_prefix="_"): col = f"x_{col}" if len(col) > self.COL_NAME_MAX_LEN: - col = col[: self.COL_NAME_MAX_LEN] + col = col[:self.COL_NAME_MAX_LEN] return col diff --git a/parsons/databases/database_connector.py b/parsons/databases/database_connector.py deleted file mode 100644 index e6778846be..0000000000 --- a/parsons/databases/database_connector.py +++ /dev/null @@ -1,190 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Optional -from parsons.etl.table import Table - - -class DatabaseConnector(ABC): - """ - An abstract base class that provides a uniform interface for all Parsons database connectors. - This class should be used in functions instead of the specific database connector classes - when the functions don't rely on database-specific functionality. - - It ensures that any class that inherits from it implements the methods that are uniform - operations when working with databases. - - Should you use `DatabaseConnector` instead of `Redshift`/`BigQuery`/etc? - - Overall this class is mostly useful for code in the Parsons library, not code using it. - There could be some exceptions. In general though, if you are writing a script to do a task - like moving data out of an API service and into a data warehouse, you probably do not need - to use DatabaseConnector. You can probably just use the Parsons class that directly corresponds - with the database that you use. - - Here are more examples of situations where you may or may not need to use DatabaseConnector: - - 1. You do not use type annotations, or you don't know what "type annotations" are - No - - If you do not use type annotations for your code, then you do not need to think about - `DatabaseConnector` when writing your code. This is the most common case. If none - of the cases below apply to you, then you probably don't need it. - - In this simple example, we are not using type annotations in our code. We don't need - to think about exactly what class is being passed in. Python will figure it out. - - ```python - def my_database_function(db): - some_data = get_some_data() - db.copy("some_table", some_data) - - # These will all just work: - my_database_function(Redshift()) - my_database_function(MySQL()) - my_database_functon(BigQuery()) - ``` - - 2. You only use one database in your work - No - - This is where most people will fall. Usually code is not intended to run on - multiple databases without modification. For example, if you are working for - an organization that uses Amazon Redshift as your data warehouse, you do not - need to use `DatabaseConnector` to write ETL scripts to load data into your - Redshift. It is rare that organizations switch databases. In the cases where - that does occur, usually more work is required to migrate your environment and - your vendor-specific SQL than would be saved by using `DatabaseConnector`. - - 3. You are writing a sample script or a tutorial - Yes - - If you are using Parsons to write a sample script or tutorial, you should use - `DatabaseConnector`! If you use `DatabaseConnector` type annotations and the - `discover_database` function, then your sample code will run on any system. - This makes it much easier for new programmers to get your code working on - their system. - - 4. Utility code inside Parsons or other libraries - Yes - - If you are writing a utility script inside Parsons or another library meant - for broad distribution, you should probably use `DatabaseConnector` type - annotations. This will ensure that your library code will be usable by the - widest possible set of users, not just users on one specific database. - - Developer Notes: - This class is an Abstract Base Class (ABC). It's designed to ensure that all classes - inheriting from it implement certain methods, enforcing a consistent interface across - database connectors. - - If you need to add a new method to the database connectors, there are three options: - 1. Add the method to this ABC and implement it for all databases. - 2. Add the method to this ABC and implement it for some databases while adding stubs for - others. - 3. Implement the method on a specific database connector without touching the ABC. - - If you go the second route, you can add a stub method like this: - - .. code-block:: python - - def new_method(self, arg1, arg2): - raise NotImplementedError("Method not implemented for this database connector.") - ``` - - This communicates clearly to users that the method does not exist for certain connectors. - - If you go the third route, remember that you're responsible for making sure your new - method matches the existing methods in other database connectors. For example, if you're - adding a method that already exists in another connector, like Redshift, you need to ensure - your new method behaves the same way and has the same parameters with the same types in the - same order. See the note below for more detail. - - Note: - The Python type system (as of 3.10.6) will not stop you from breaking the type contract - of method signatures when implementing a subclass. It is up to the author of a database - connector to ensure that it satisfies this interface. Be careful to, for example, not - change the types of the parameters or leave out optional parameters that are specified - in the interface. - - Any such inconsistencies can cause unexpected runtime errors that will not be caught by - the type checker. - - It is safe to add additional features to subclasses, such as new methods or extra *optional* - parameters to specified methods. In general adding new methods is safe, but adding optional - parameters to methods specified in the interface should be considered bad practice, because - it could result in unexpected behavior. - - Example usage: - - .. code-block:: python - - def my_function(db: DatabaseConnector, data: Table): - # Your code here, using the db object - - # Pass an instance of a class that inherits from DatabaseConnector, e.g. Redshift - my_function(some_db_instance, some_data) - - """ - - @abstractmethod - def table_exists(self, table_name: str) -> bool: - """Check if a table or view exists in the database. - - `Args:` - table_name: str - The table name and schema (e.g. ``myschema.mytable``). - - `Returns:` - boolean - ``True`` if the table exists and ``False`` if it does not. - """ - pass - - @abstractmethod - def copy(self, tbl: Table, table_name: str, if_exists: str): - """Copy a :ref:`parsons-table` to the database. - - `Args`: - tbl (Table): - Table containing the data to save. - table_name (str): - The destination table name (ex. ``my_schema.my_table``). - if_exists (str): - If the table already exists, either ``fail``, ``append``, ``drop`` - or ``truncate`` the table. - """ - pass - - @abstractmethod - def query(self, sql: str, parameters: Optional[list] = None) -> Optional[Table]: - """Execute a query against the database. Will return ``None`` if the query returns empty. - - To include python variables in your query, it is recommended to pass them as parameters, - following the `psycopg style - `. - Using the ``parameters`` argument ensures that values are escaped properly, and avoids SQL - injection attacks. - - **Parameter Examples** - - .. code-block:: python - - # Note that the name contains a quote, which could break your query if not escaped - # properly. - name = "Beatrice O'Brady" - sql = "SELECT * FROM my_table WHERE name = %s" - db.query(sql, parameters=[name]) - - .. code-block:: python - - names = ["Allen Smith", "Beatrice O'Brady", "Cathy Thompson"] - placeholders = ', '.join('%s' for item in names) - sql = f"SELECT * FROM my_table WHERE name IN ({placeholders})" - db.query(sql, parameters=names) - - `Args:` - sql: str - A valid SQL statement - parameters: Optional[list] - A list of python variables to be converted into SQL values in your query - - `Returns:` - Parsons Table - See :ref:`parsons-table` for output options. - """ - pass diff --git a/parsons/databases/db_sync.py b/parsons/databases/db_sync.py index aeb93f40a3..f250e33bff 100644 --- a/parsons/databases/db_sync.py +++ b/parsons/databases/db_sync.py @@ -28,14 +28,8 @@ class DBSync: A DBSync object. """ - def __init__( - self, - source_db, - destination_db, - read_chunk_size=100_000, - write_chunk_size=None, - retries=0, - ): + def __init__(self, source_db, destination_db, read_chunk_size=100_000, write_chunk_size=None, + retries=0): self.source_db = source_db self.dest_db = destination_db @@ -43,15 +37,8 @@ def __init__( self.write_chunk_size = write_chunk_size or read_chunk_size self.retries = retries - def table_sync_full( - self, - source_table, - destination_table, - if_exists="drop", - order_by=None, - verify_row_count=True, - **kwargs, - ): + def table_sync_full(self, source_table, destination_table, if_exists='drop', + order_by=None, verify_row_count=True, **kwargs): """ Full sync of table from a source database to a destination database. This will wipe all data from the destination table. @@ -82,18 +69,16 @@ def table_sync_full( source_tbl = self.source_db.table(source_table) destination_tbl = self.dest_db.table(destination_table) - logger.info( - f"Syncing full table data from {source_table} to {destination_table}" - ) + logger.info(f'Syncing full table data from {source_table} to {destination_table}') # Drop or truncate if the destination table exists if destination_tbl.exists: - if if_exists == "drop": + if if_exists == 'drop': destination_tbl.drop() - elif if_exists == "truncate": + elif if_exists == 'truncate': self._check_column_match(source_tbl, destination_tbl) destination_tbl.truncate() - elif if_exists == "drop_if_needed": + elif if_exists == 'drop_if_needed': try: self._check_column_match(source_tbl, destination_tbl) destination_tbl.truncate() @@ -101,32 +86,22 @@ def table_sync_full( logger.info(f"needed to drop {destination_tbl}...") destination_tbl.drop() else: - raise ValueError( - "Invalid if_exists argument. Must be drop or truncate." - ) + raise ValueError('Invalid if_exists argument. Must be drop or truncate.') # Create the table, if needed. if not destination_tbl.exists: self.create_table(source_table, destination_table) - copied_rows = self.copy_rows( - source_table, destination_table, None, order_by, **kwargs - ) + copied_rows = self.copy_rows(source_table, destination_table, None, + order_by, **kwargs) if verify_row_count: self._row_count_verify(source_tbl, destination_tbl) - logger.info(f"{source_table} synced: {copied_rows} total rows copied.") - - def table_sync_incremental( - self, - source_table, - destination_table, - primary_key, - distinct_check=True, - verify_row_count=True, - **kwargs, - ): + logger.info(f'{source_table} synced: {copied_rows} total rows copied.') + + def table_sync_incremental(self, source_table, destination_table, primary_key, + distinct_check=True, verify_row_count=True, **kwargs): """ Incremental sync of table from a source database to a destination database using an incremental primary key. @@ -158,65 +133,47 @@ def table_sync_incremental( # Check that the destination table exists. If it does not, then run a # full sync instead. if not destination_tbl.exists: - logger.info( - "Destination tables %s does not exist, running a full sync", - destination_table, - ) - self.table_sync_full( - source_table, destination_table, order_by=primary_key, **kwargs - ) + logger.info('Destination tables %s does not exist, running a full sync', + destination_table) + self.table_sync_full(source_table, destination_table, order_by=primary_key, **kwargs) return # Check that the source table primary key is distinct if distinct_check and not source_tbl.distinct_primary_key(primary_key): - logger.info( - "Checking for distinct values for column %s in table %s", - primary_key, - source_table, - ) - raise ValueError("{primary_key} is not distinct in source table.") + logger.info('Checking for distinct values for column %s in table %s', + primary_key, source_table) + raise ValueError('{primary_key} is not distinct in source table.') # Get the max source table and destination table primary key - logger.debug( - "Calculating the maximum value for %s for source table %s", - primary_key, - source_table, - ) + logger.debug('Calculating the maximum value for %s for source table %s', primary_key, + source_table) source_max_pk = source_tbl.max_primary_key(primary_key) - logger.debug( - "Calculating the maximum value for %s for destination table %s", - primary_key, - destination_table, - ) + logger.debug('Calculating the maximum value for %s for destination table %s', primary_key, + destination_table) dest_max_pk = destination_tbl.max_primary_key(primary_key) # Check for a mismatch in row counts; if dest_max_pk is None, or destination is empty # and we don't have to worry about this check. if dest_max_pk is not None and dest_max_pk > source_max_pk: - raise ValueError( - "Destination DB primary key greater than source DB primary key." - ) + raise ValueError('Destination DB primary key greater than source DB primary key.') # Do not copied if row counts are equal. elif dest_max_pk == source_max_pk: - logger.info("Tables are already in sync.") + logger.info('Tables are already in sync.') return None else: - rows_copied = self.copy_rows( - source_table, destination_table, dest_max_pk, primary_key, **kwargs - ) + rows_copied = self.copy_rows(source_table, destination_table, dest_max_pk, + primary_key, **kwargs) - logger.info("Copied %s new rows to %s.", rows_copied, destination_table) + logger.info('Copied %s new rows to %s.', rows_copied, destination_table) if verify_row_count: self._row_count_verify(source_tbl, destination_tbl) - logger.info(f"{source_table} synced to {destination_table}.") + logger.info(f'{source_table} synced to {destination_table}.') - def copy_rows( - self, source_table_name, destination_table_name, cutoff, order_by, **kwargs - ): + def copy_rows(self, source_table_name, destination_table_name, cutoff, order_by, **kwargs): """ Copy the rows from the source to the destination. @@ -255,19 +212,15 @@ def copy_rows( if cutoff: # If we have a cutoff, we are loading data incrementally -- filter out # any data before our cutoff - rows = source_table.get_new_rows( - primary_key=order_by, - cutoff_value=cutoff, - offset=total_rows_downloaded, - chunk_size=self.read_chunk_size, - ) + rows = source_table.get_new_rows(primary_key=order_by, + cutoff_value=cutoff, + offset=total_rows_downloaded, + chunk_size=self.read_chunk_size) else: # Get a chunk - rows = source_table.get_rows( - offset=total_rows_downloaded, - chunk_size=self.read_chunk_size, - order_by=order_by, - ) + rows = source_table.get_rows(offset=total_rows_downloaded, + chunk_size=self.read_chunk_size, + order_by=order_by) number_of_rows = rows.num_rows total_rows_downloaded += number_of_rows @@ -276,14 +229,9 @@ def copy_rows( if number_of_rows == 0: # If we have any rows that are unwritten, flush them to the destination database if rows_buffered > 0: - logger.debug( - "Copying %s rows to %s", - rows_buffered, - destination_table_name, - ) - self.dest_db.copy( - buffer, destination_table_name, if_exists="append", **kwargs - ) + logger.debug('Copying %s rows to %s', rows_buffered, destination_table_name) + self.dest_db.copy(buffer, destination_table_name, if_exists='append', + **kwargs) total_rows_written += rows_buffered # Reset the buffer @@ -298,12 +246,8 @@ def copy_rows( # If our buffer reaches our write threshold, write it out if rows_buffered >= self.write_chunk_size: - logger.debug( - "Copying %s rows to %s", rows_buffered, destination_table_name - ) - self.dest_db.copy( - buffer, destination_table_name, if_exists="append", **kwargs - ) + logger.debug('Copying %s rows to %s', rows_buffered, destination_table_name) + self.dest_db.copy(buffer, destination_table_name, if_exists='append', **kwargs) total_rows_written += rows_buffered # Reset the buffer @@ -316,11 +260,11 @@ def copy_rows( # If we are out of retries, fail if retries_left == 0: - logger.debug("No retries remaining") + logger.debug('No retries remaining') raise # Otherwise, log the exception and try again - logger.exception("Unhandled error copying data; retrying") + logger.exception('Unhandled error copying data; retrying') return total_rows_written @@ -331,10 +275,8 @@ def _check_column_match(source_table_obj, destination_table_obj): """ if source_table_obj.columns != destination_table_obj.columns: - raise ValueError( - """Destination table columns do not match source table columns. - Consider dropping destination table and running a full sync.""" - ) + raise ValueError("""Destination table columns do not match source table columns. + Consider dropping destination table and running a full sync.""") @staticmethod def _row_count_verify(source_table_obj, destination_table_obj): @@ -346,15 +288,11 @@ def _row_count_verify(source_table_obj, destination_table_obj): dest_row_count = destination_table_obj.num_rows if source_row_count != dest_row_count: - logger.warning( - ( - f"Table count mismatch. Source table contains {source_row_count}.", - f" Destination table contains {dest_row_count}.", - ) - ) + logger.warning((f'Table count mismatch. Source table contains {source_row_count}.', + f' Destination table contains {dest_row_count}.')) return False - logger.info("Source and destination table row counts match.") + logger.info('Source and destination table row counts match.') return True def create_table(self, source_table, destination_table): @@ -369,7 +307,5 @@ def create_table(self, source_table, destination_table): source_obj = self.source_db.get_table_object(source_table) self.dest_db.create_table(source_obj, destination_table) except Exception: - logger.warning( - "Unable to create destination table based on source table; we will " - 'fallback to using "copy" to create the destination.' - ) + logger.warning('Unable to create destination table based on source table; we will ' + 'fallback to using "copy" to create the destination.') diff --git a/parsons/databases/discover_database.py b/parsons/databases/discover_database.py deleted file mode 100644 index 1d51a37112..0000000000 --- a/parsons/databases/discover_database.py +++ /dev/null @@ -1,79 +0,0 @@ -import os -from typing import Optional, Union, Type, List - -from parsons.databases.database_connector import DatabaseConnector -from parsons.databases.redshift import Redshift -from parsons.databases.mysql import MySQL -from parsons.databases.postgres import Postgres -from parsons.google.google_bigquery import GoogleBigQuery - - -def discover_database( - default_connector: Optional[ - Union[Type[DatabaseConnector], List[Type[DatabaseConnector]]] - ] = None -) -> DatabaseConnector: - """Create an appropriate ``DatabaseConnector`` based on environmental variables. - - Will search the environmental variables for the proper credentials for the - Redshift, MySQL, Postgres, and BigQuery connectors. See the documentation - for the connectors to variables required to initialize them. - - If no suitable configuration is found, will raise an error. - - If multiple suitable configurations are found, will raise an error unless - a default connector class or list of classes is provided. - - Note that the variables to be searched for are hard-coded in this function, - since they are unlikely to change. If that is done, for some reason, or a - new database connector is added, ``discover_database`` should be updated - - Args: - default_connector: Optional, single Class or list of Classes inheriting from - DatabaseConnector to be used as default in case multiple database configurations - are detected. - - Returns: - DatabaseConnector: The database connector configured in the environment. - """ - connectors = { - "Redshift": Redshift, - "MySQL": MySQL, - "Postgres": Postgres, - "GoogleBigQuery": GoogleBigQuery, - } - - password_vars = { - "Redshift": "REDSHIFT_PASSWORD", - "MySQL": "MYSQL_PASSWORD", - "Postgres": "PGPASSWORD", - "GoogleBigQuery": "GOOGLE_APPLICATION_CREDENTIALS", - } - - detected = [name for name in connectors.keys() if os.getenv(password_vars[name])] - - if len(detected) > 1: - if default_connector is None: - raise EnvironmentError( - f"Multiple database configurations detected: {detected}." - " Please specify a default connector." - ) - - if isinstance(default_connector, list): - for connector in default_connector: - if connector.__name__ in detected: - return connector() - raise EnvironmentError( - f"None of the default connectors {default_connector} were detected." - ) - elif default_connector.__name__ in detected: - return default_connector() - else: - raise EnvironmentError( - f"Default connector {default_connector} not detected. Detected: {detected}." - ) - - elif detected: - return connectors[detected[0]]() - else: - raise EnvironmentError("Could not find any database configuration.") diff --git a/parsons/databases/mysql/__init__.py b/parsons/databases/mysql/__init__.py index 6284e7dca4..b090a41f79 100644 --- a/parsons/databases/mysql/__init__.py +++ b/parsons/databases/mysql/__init__.py @@ -1,3 +1,5 @@ from parsons.databases.mysql.mysql import MySQL -__all__ = ["MySQL"] +__all__ = [ + 'MySQL' +] diff --git a/parsons/databases/mysql/constants.py b/parsons/databases/mysql/constants.py index 2a39d90280..81784c2362 100644 --- a/parsons/databases/mysql/constants.py +++ b/parsons/databases/mysql/constants.py @@ -1,6 +1,6 @@ # Additional padding to add on to the maximum column width to account # for the addition of future data sets. -VARCHAR_PAD = 0.25 +VARCHAR_PAD = .25 COL_NAME_MAX_LEN = 64 diff --git a/parsons/databases/mysql/create_table.py b/parsons/databases/mysql/create_table.py index f2f958bac9..c4fe00b3a8 100644 --- a/parsons/databases/mysql/create_table.py +++ b/parsons/databases/mysql/create_table.py @@ -8,6 +8,7 @@ class MySQLCreateTable(DatabaseCreateStatement): + def __init__(self): super().__init__() @@ -41,8 +42,8 @@ def evaluate_column(self, column_rows): col_type = self.data_type(row, col_type) # Calculate width if a varchar - if col_type == "varchar": - row_width = len(str(row.encode("utf-8"))) + if col_type == 'varchar': + row_width = len(str(row.encode('utf-8'))) # Evaluate width vs. current max width if row_width > col_width: @@ -58,7 +59,7 @@ def evaluate_table(self, tbl): for col in tbl.columns: col_type, col_width = self.evaluate_column(tbl.column_data(col)) - col_map = {"name": col, "type": col_type, "width": col_width} + col_map = {'name': col, 'type': col_type, 'width': col_width} table_map.append(col_map) return table_map @@ -76,11 +77,11 @@ def create_statement(self, tbl, table_name, strict_length=True): column_syntax = [] for c in table_map: if strict_length: - col_width = int(c["width"] + (self.VARCHAR_PAD * c["width"])) + col_width = int(c['width'] + (self.VARCHAR_PAD * c['width'])) else: - col_width = self.round_longest(c["width"]) + col_width = self.round_longest(c['width']) - if c["type"] == "varchar": + if c['type'] == 'varchar': column_syntax.append(f"{c['name']} {c['type']}({col_width}) \n") else: column_syntax.append(f"{c['name']} {c['type']} \n") diff --git a/parsons/databases/mysql/mysql.py b/parsons/databases/mysql/mysql.py index 3572d82bce..b2511a5596 100644 --- a/parsons/databases/mysql/mysql.py +++ b/parsons/databases/mysql/mysql.py @@ -7,7 +7,6 @@ import pickle import logging import os -from parsons.databases.database_connector import DatabaseConnector from parsons.databases.table import BaseTable from parsons.databases.mysql.create_table import MySQLCreateTable from parsons.databases.alchemy import Alchemy @@ -20,7 +19,7 @@ logger = logging.getLogger(__name__) -class MySQL(DatabaseConnector, MySQLCreateTable, Alchemy): +class MySQL(MySQLCreateTable, Alchemy): """ Connect to a MySQL database. @@ -40,11 +39,11 @@ class MySQL(DatabaseConnector, MySQLCreateTable, Alchemy): def __init__(self, host=None, username=None, password=None, db=None, port=3306): super().__init__() - self.username = check_env.check("MYSQL_USERNAME", username) - self.password = check_env.check("MYSQL_PASSWORD", password) - self.host = check_env.check("MYSQL_HOST", host) - self.db = check_env.check("MYSQL_DB", db) - self.port = port or os.environ.get("MYSQL_PORT") + self.username = check_env.check('MYSQL_USERNAME', username) + self.password = check_env.check('MYSQL_PASSWORD', password) + self.host = check_env.check('MYSQL_HOST', host) + self.db = check_env.check('MYSQL_DB', db) + self.port = port or os.environ.get('MYSQL_PORT') @contextmanager def connection(self): @@ -62,13 +61,11 @@ def connection(self): """ # Create a mysql connection and cursor - connection = mysql.connect( - host=self.host, - user=self.username, - passwd=self.password, - database=self.db, - port=self.port, - ) + connection = mysql.connect(host=self.host, + user=self.username, + passwd=self.password, + database=self.db, + port=self.port) try: yield connection @@ -152,11 +149,12 @@ def query_with_connection(self, sql, connection, parameters=None, commit=True): See :ref:`parsons-table` for output options. """ with self.cursor(connection) as cursor: + # The python connector can only execute a single sql statement, so we will # break up each statement and execute them separately. - for s in sql.strip().split(";"): + for s in sql.strip().split(';'): if len(s) != 0: - logger.debug(f"SQL Query: {sql}") + logger.debug(f'SQL Query: {sql}') cursor.execute(s, parameters) if commit: @@ -164,7 +162,7 @@ def query_with_connection(self, sql, connection, parameters=None, commit=True): # If the SQL query provides no response, then return None if not cursor.description: - logger.debug("Query returned 0 rows") + logger.debug('Query returned 0 rows') return None else: @@ -173,7 +171,7 @@ def query_with_connection(self, sql, connection, parameters=None, commit=True): # all the type information for each field.) temp_file = files.create_temp_file() - with open(temp_file, "wb") as f: + with open(temp_file, 'wb') as f: # Grab the header pickle.dump(cursor.column_names, f) @@ -182,24 +180,17 @@ def query_with_connection(self, sql, connection, parameters=None, commit=True): if len(batch) == 0: break - logger.debug(f"Fetched {len(batch)} rows.") + logger.debug(f'Fetched {len(batch)} rows.') for row in batch: pickle.dump(row, f) # Load a Table from the file final_tbl = Table(petl.frompickle(temp_file)) - logger.debug(f"Query returned {final_tbl.num_rows} rows.") + logger.debug(f'Query returned {final_tbl.num_rows} rows.') return final_tbl - def copy( - self, - tbl: Table, - table_name: str, - if_exists: str = "fail", - chunk_size: int = 1000, - strict_length: bool = True, - ): + def copy(self, tbl, table_name, if_exists='fail', chunk_size=1000, strict_length=True): """ Copy a :ref:`parsons-table` to the database. @@ -226,17 +217,16 @@ def copy( """ if tbl.num_rows == 0: - logger.info("Parsons table is empty. Table will not be created.") + logger.info('Parsons table is empty. Table will not be created.') return None with self.connection() as connection: + # Create table if not exists if self._create_table_precheck(connection, table_name, if_exists): - sql = self.create_statement( - tbl, table_name, strict_length=strict_length - ) + sql = self.create_statement(tbl, table_name, strict_length=strict_length) self.query_with_connection(sql, connection, commit=False) - logger.info(f"Table {table_name} created.") + logger.info(f'Table {table_name} created.') # Chunk tables in batches of 1K rows, though this can be tuned and # optimized further. @@ -281,21 +271,22 @@ def _create_table_precheck(self, connection, table_name, if_exists): True if the table needs to be created, False otherwise. """ - if if_exists not in ["fail", "truncate", "append", "drop"]: + if if_exists not in ['fail', 'truncate', 'append', 'drop']: raise ValueError("Invalid value for `if_exists` argument") # If the table exists, evaluate the if_exists argument for next steps. if self.table_exists(table_name): - if if_exists == "fail": - raise ValueError("Table already exists.") - if if_exists == "truncate": + if if_exists == 'fail': + raise ValueError('Table already exists.') + + if if_exists == 'truncate': sql = f"TRUNCATE TABLE {table_name}" self.query_with_connection(sql, connection, commit=False) logger.info(f"{table_name} truncated.") return False - if if_exists == "drop": + if if_exists == 'drop': sql = f"DROP TABLE {table_name}" self.query_with_connection(sql, connection, commit=False) logger.info(f"{table_name} dropped.") @@ -304,13 +295,15 @@ def _create_table_precheck(self, connection, table_name, if_exists): else: return True - def table_exists(self, table_name: str) -> bool: + def table_exists(self, table_name): """ Check if a table or view exists in the database. `Args:` table_name: str The table name + view: boolean + Check to see if a view exists by the same name `Returns:` boolean diff --git a/parsons/databases/postgres/__init__.py b/parsons/databases/postgres/__init__.py index cfabbbbeef..5b449e2b0b 100644 --- a/parsons/databases/postgres/__init__.py +++ b/parsons/databases/postgres/__init__.py @@ -1,3 +1,5 @@ from parsons.databases.postgres.postgres import Postgres -__all__ = ["Postgres"] +__all__ = [ + 'Postgres' +] diff --git a/parsons/databases/postgres/constants.py b/parsons/databases/postgres/constants.py index 690bf085f2..977e9cff59 100644 --- a/parsons/databases/postgres/constants.py +++ b/parsons/databases/postgres/constants.py @@ -1,6 +1,6 @@ COL_NAME_MAX_LEN = 120 -DECIMAL = "decimal" +DECIMAL = 'decimal' REPLACE_CHARS = {" ": ""} diff --git a/parsons/databases/postgres/postgres.py b/parsons/databases/postgres/postgres.py index 1463ec85cd..ef1fff580f 100644 --- a/parsons/databases/postgres/postgres.py +++ b/parsons/databases/postgres/postgres.py @@ -1,8 +1,6 @@ from parsons.databases.postgres.postgres_core import PostgresCore from parsons.databases.table import BaseTable from parsons.databases.alchemy import Alchemy -from parsons.databases.database_connector import DatabaseConnector -from parsons.etl.table import Table import logging import os @@ -10,7 +8,7 @@ logger = logging.getLogger(__name__) -class Postgres(PostgresCore, Alchemy, DatabaseConnector): +class Postgres(PostgresCore, Alchemy): """ A Postgres class to connect to database. Credentials can be passed from a ``.pgpass`` file stored in your home directory or with environmental variables. @@ -30,37 +28,27 @@ class Postgres(PostgresCore, Alchemy, DatabaseConnector): Seconds to timeout if connection not established. """ - def __init__( - self, username=None, password=None, host=None, db=None, port=5432, timeout=10 - ): + def __init__(self, username=None, password=None, host=None, db=None, port=5432, timeout=10): super().__init__() - self.username = username or os.environ.get("PGUSER") - self.password = password or os.environ.get("PGPASSWORD") - self.host = host or os.environ.get("PGHOST") - self.db = db or os.environ.get("PGDATABASE") - self.port = port or os.environ.get("PGPORT") + self.username = username or os.environ.get('PGUSER') + self.password = password or os.environ.get('PGPASSWORD') + self.host = host or os.environ.get('PGHOST') + self.db = db or os.environ.get('PGDATABASE') + self.port = port or os.environ.get('PGPORT') # Check if there is a pgpass file. Psycopg2 will search for this file first when # creating a connection. - pgpass = os.path.isfile(os.path.expanduser("~/.pgpass")) + pgpass = os.path.isfile(os.path.expanduser('~/.pgpass')) if not any([self.username, self.password, self.host, self.db]) and not pgpass: - raise ValueError( - "Connection arguments missing. Please pass as a pgpass file, kwargs", - "or env variables.", - ) + raise ValueError('Connection arguments missing. Please pass as a pgpass file, kwargs', + 'or env variables.') self.timeout = timeout - self.dialect = "postgres" - - def copy( - self, - tbl: Table, - table_name: str, - if_exists: str = "fail", - strict_length: bool = False, - ): + self.dialect = 'postgres' + + def copy(self, tbl, table_name, if_exists='fail', strict_length=False): """ Copy a :ref:`parsons-table` to Postgres. @@ -76,26 +64,26 @@ def copy( If the database table needs to be created, strict_length determines whether the created table's column sizes will be sized to exactly fit the current data, or if their size will be rounded up to account for future values being larger - then the current dataset. Defaults to ``False``. + then the current dataset """ with self.connection() as connection: + # Auto-generate table if self._create_table_precheck(connection, table_name, if_exists): + # Create the table # To Do: Pass in the advanced configuration parameters. - sql = self.create_statement( - tbl, table_name, strict_length=strict_length - ) + sql = self.create_statement(tbl, table_name, strict_length=strict_length) self.query_with_connection(sql, connection, commit=False) - logger.info(f"{table_name} created.") + logger.info(f'{table_name} created.') sql = f"COPY {table_name} FROM STDIN CSV HEADER;" with self.cursor(connection) as cursor: cursor.copy_expert(sql, open(tbl.to_csv(), "r")) - logger.info(f"{tbl.num_rows} rows copied to {table_name}.") + logger.info(f'{tbl.num_rows} rows copied to {table_name}.') def table(self, table_name): # Return a Postgres table object diff --git a/parsons/databases/postgres/postgres_core.py b/parsons/databases/postgres/postgres_core.py index 8b6557de9c..7d150ee8e5 100644 --- a/parsons/databases/postgres/postgres_core.py +++ b/parsons/databases/postgres/postgres_core.py @@ -1,5 +1,4 @@ from contextlib import contextmanager -from typing import Optional import psycopg2 import psycopg2.extras from parsons.etl.table import Table @@ -18,6 +17,7 @@ class PostgresCore(PostgresCreateStatement): + @contextmanager def connection(self): """ @@ -34,14 +34,9 @@ def connection(self): """ # Create a psycopg2 connection and cursor - conn = psycopg2.connect( - user=self.username, - password=self.password, - host=self.host, - dbname=self.db, - port=self.port, - connect_timeout=self.timeout, - ) + conn = psycopg2.connect(user=self.username, password=self.password, + host=self.host, dbname=self.db, port=self.port, + connect_timeout=self.timeout) try: yield conn @@ -62,7 +57,7 @@ def cursor(self, connection): finally: cur.close() - def query(self, sql: str, parameters: Optional[list] = None) -> Optional[Table]: + def query(self, sql, parameters=None): """ Execute a query against the database. Will return ``None`` if the query returns zero rows. @@ -127,7 +122,7 @@ def query_with_connection(self, sql, connection, parameters=None, commit=True): with self.cursor(connection) as cursor: - logger.debug(f"SQL Query: {sql}") + logger.debug(f'SQL Query: {sql}') cursor.execute(sql, parameters) if commit: @@ -135,7 +130,7 @@ def query_with_connection(self, sql, connection, parameters=None, commit=True): # If the cursor is empty, don't cause an error if not cursor.description: - logger.debug("Query returned 0 rows") + logger.debug('Query returned 0 rows') return None else: @@ -146,7 +141,7 @@ def query_with_connection(self, sql, connection, parameters=None, commit=True): temp_file = files.create_temp_file() - with open(temp_file, "wb") as f: + with open(temp_file, 'wb') as f: # Grab the header header = [i[0] for i in cursor.description] pickle.dump(header, f) @@ -156,14 +151,14 @@ def query_with_connection(self, sql, connection, parameters=None, commit=True): if not batch: break - logger.debug(f"Fetched {len(batch)} rows.") + logger.debug(f'Fetched {len(batch)} rows.') for row in batch: pickle.dump(list(row), f) # Load a Table from the file final_tbl = Table(petl.frompickle(temp_file)) - logger.debug(f"Query returned {final_tbl.num_rows} rows.") + logger.debug(f'Query returned {final_tbl.num_rows} rows.') return final_tbl def _create_table_precheck(self, connection, table_name, if_exists): @@ -183,21 +178,21 @@ def _create_table_precheck(self, connection, table_name, if_exists): True if the table needs to be created, False otherwise. """ - if if_exists not in ["fail", "truncate", "append", "drop"]: + if if_exists not in ['fail', 'truncate', 'append', 'drop']: raise ValueError("Invalid value for `if_exists` argument") # If the table exists, evaluate the if_exists argument for next steps. if self.table_exists_with_connection(table_name, connection): - if if_exists == "fail": - raise ValueError("Table already exists.") + if if_exists == 'fail': + raise ValueError('Table already exists.') - if if_exists == "truncate": + if if_exists == 'truncate': truncate_sql = f"TRUNCATE TABLE {table_name};" logger.info(f"Truncating {table_name}.") self.query_with_connection(truncate_sql, connection, commit=False) - if if_exists == "drop": + if if_exists == 'drop': logger.info(f"Dropping {table_name}.") drop_sql = f"DROP TABLE {table_name};" self.query_with_connection(drop_sql, connection, commit=False) @@ -208,7 +203,7 @@ def _create_table_precheck(self, connection, table_name, if_exists): else: return True - def table_exists(self, table_name: str, view: bool = True) -> bool: + def table_exists(self, table_name, view=True): """ Check if a table or view exists in the database. @@ -216,7 +211,7 @@ def table_exists(self, table_name: str, view: bool = True) -> bool: table_name: str The table name and schema (e.g. ``myschema.mytable``). view: boolean - Check to see if a view exists by the same name. Defaults to ``True``. + Check to see if a view exists by the same name `Returns:` boolean @@ -230,7 +225,7 @@ def table_exists_with_connection(self, table_name, connection, view=True): # Extract the table and schema from this. If no schema is detected then # will default to the public schema. try: - schema, table = table_name.lower().split(".", 1) + schema, table = table_name.lower().split('.', 1) except ValueError: schema, table = "public", table_name.lower() diff --git a/parsons/databases/postgres/postgres_create_statement.py b/parsons/databases/postgres/postgres_create_statement.py index 7fa4cd0a41..22424e4e1e 100644 --- a/parsons/databases/postgres/postgres_create_statement.py +++ b/parsons/databases/postgres/postgres_create_statement.py @@ -8,6 +8,7 @@ class PostgresCreateStatement(DatabaseCreateStatement): + def __init__(self): super().__init__() @@ -42,24 +43,15 @@ def _rename_reserved_word(self, col, index): """ return f"col_{index}" - def create_statement( - self, - tbl, - table_name, - padding=None, - distkey=None, - sortkey=None, - varchar_max=None, - varchar_truncate=True, - columntypes=None, - strict_length=True, - ): + def create_statement(self, tbl, table_name, padding=None, distkey=None, sortkey=None, + varchar_max=None, varchar_truncate=True, columntypes=None, + strict_length=True): # Generate a table create statement. Distkeys and sortkeys are only used by # Redshift and should not be passed when generating a create statement for # Postgres. if tbl.num_rows == 0: - raise ValueError("Table is empty. Must have 1 or more rows.") + raise ValueError('Table is empty. Must have 1 or more rows.') # Validate and rename column names if needed tbl.table = petl.setheader(tbl.table, self.column_name_validate(tbl.columns)) @@ -67,27 +59,27 @@ def create_statement( mapping = self.generate_data_types(tbl) if padding: - mapping["longest"] = self.vc_padding(mapping, padding) + mapping['longest'] = self.vc_padding(mapping, padding) elif not strict_length: - mapping["longest"] = self.vc_step(mapping) + mapping['longest'] = self.vc_step(mapping) if varchar_max: - mapping["longest"] = self.vc_max(mapping, varchar_max) + mapping['longest'] = self.vc_max(mapping, varchar_max) if varchar_truncate: - mapping["longest"] = self.vc_trunc(mapping) + mapping['longest'] = self.vc_trunc(mapping) - mapping["longest"] = self.vc_validate(mapping) + mapping['longest'] = self.vc_validate(mapping) # Add any provided column type overrides if columntypes: - for i in range(len(mapping["headers"])): - col = mapping["headers"][i] + for i in range(len(mapping['headers'])): + col = mapping['headers'][i] if columntypes.get(col): - mapping["type_list"][i] = columntypes[col] + mapping['type_list'][i] = columntypes[col] # Enclose in quotes - mapping["headers"] = [f'"{h}"' for h in mapping["headers"]] + mapping['headers'] = [f'"{h}"'for h in mapping['headers']] return self.create_sql(table_name, mapping, distkey=distkey, sortkey=sortkey) @@ -109,19 +101,19 @@ def generate_data_types(self, table): # Populate empty values for the columns for col in table.columns: longest.append(0) - type_list.append("") + type_list.append('') for row in cont: for i in range(len(row)): # NA is the csv null value - if type_list[i] == "varchar" or row[i] in ["NA", ""]: + if type_list[i] == 'varchar' or row[i] in ['NA', '']: pass else: var_type = self.data_type(row[i], type_list[i]) type_list[i] = var_type # Calculate width - width = len(str(row[i]).encode("utf-8")) + width = len(str(row[i]).encode('utf-8')) if width > longest[i]: longest[i] = width @@ -129,17 +121,19 @@ def generate_data_types(self, table): # If the entire column is either one of those (or a mix of the two) # the type will be empty. # Fill with a default varchar - type_list = [typ or "varchar" for typ in type_list] + type_list = [typ or 'varchar' for typ in type_list] - return {"longest": longest, "headers": table.columns, "type_list": type_list} + return {'longest': longest, + 'headers': table.columns, + 'type_list': type_list} def vc_padding(self, mapping, padding): # Pad the width of a varchar column - return [int(c + (c * padding)) for c in mapping["longest"]] + return [int(c + (c * padding)) for c in mapping['longest']] def vc_step(self, mapping): - return [self.round_longest(c) for c in mapping["longest"]] + return [self.round_longest(c) for c in mapping['longest']] def vc_max(self, mapping, columns): # Set the varchar width of a column to the maximum @@ -147,49 +141,47 @@ def vc_max(self, mapping, columns): for c in columns: try: - idx = mapping["headers"].index(c) - mapping["longest"][idx] = self.VARCHAR_MAX + idx = mapping['headers'].index(c) + mapping['longest'][idx] = self.VARCHAR_MAX except KeyError as error: - logger.error("Could not find column name provided.") + logger.error('Could not find column name provided.') raise error - return mapping["longest"] + return mapping['longest'] def vc_trunc(self, mapping): - return [ - self.VARCHAR_MAX if c > self.VARCHAR_MAX else c for c in mapping["longest"] - ] + return [self.VARCHAR_MAX if c > self.VARCHAR_MAX else c for c in mapping['longest']] def vc_validate(self, mapping): - return [1 if c == 0 else c for c in mapping["longest"]] + return [1 if c == 0 else c for c in mapping['longest']] def create_sql(self, table_name, mapping, distkey=None, sortkey=None): # Generate the sql to create the table - statement = f"create table {table_name} (" + statement = f'create table {table_name} (' - for i in range(len(mapping["headers"])): - if mapping["type_list"][i] == "varchar": - statement = (statement + "\n {} varchar({}),").format( - str(mapping["headers"][i]).lower(), str(mapping["longest"][i]) - ) + for i in range(len(mapping['headers'])): + if mapping['type_list'][i] == 'varchar': + statement = (statement + '\n {} varchar({}),').format(str(mapping['headers'][i]) + .lower(), + str(mapping['longest'][i])) else: - statement = (statement + "\n " + "{} {}" + ",").format( - str(mapping["headers"][i]).lower(), mapping["type_list"][i] - ) + statement = (statement + '\n ' + '{} {}' + ',').format(str(mapping['headers'][i]) + .lower(), + mapping['type_list'][i]) - statement = statement[:-1] + ") " + statement = statement[:-1] + ') ' if distkey: - statement += f"\ndistkey({distkey}) " + statement += f'\ndistkey({distkey}) ' if sortkey: - statement += f"\nsortkey({sortkey})" + statement += f'\nsortkey({sortkey})' - statement += ";" + statement += ';' return statement diff --git a/parsons/databases/redshift/__init__.py b/parsons/databases/redshift/__init__.py index 012fbf247b..20ba2798b9 100644 --- a/parsons/databases/redshift/__init__.py +++ b/parsons/databases/redshift/__init__.py @@ -1,3 +1,5 @@ from parsons.databases.redshift.redshift import Redshift -__all__ = ["Redshift"] +__all__ = [ + 'Redshift' +] diff --git a/parsons/databases/redshift/constants.py b/parsons/databases/redshift/constants.py index 6bad02f43d..9876799d12 100644 --- a/parsons/databases/redshift/constants.py +++ b/parsons/databases/redshift/constants.py @@ -1,6 +1,6 @@ COL_NAME_MAX_LEN = 120 -FLOAT = "float" +FLOAT = 'float' REPLACE_CHARS = {" ": ""} diff --git a/parsons/databases/redshift/redshift.py b/parsons/databases/redshift/redshift.py index a870d90a11..7eb20550b7 100644 --- a/parsons/databases/redshift/redshift.py +++ b/parsons/databases/redshift/redshift.py @@ -1,4 +1,3 @@ -from typing import List, Optional from parsons.etl.table import Table from parsons.databases.redshift.rs_copy_table import RedshiftCopyTable from parsons.databases.redshift.rs_create_table import RedshiftCreateTable @@ -7,7 +6,6 @@ from parsons.databases.table import BaseTable from parsons.databases.alchemy import Alchemy from parsons.utilities import files, sql_helpers -from parsons.databases.database_connector import DatabaseConnector import psycopg2 import psycopg2.extras import os @@ -27,14 +25,8 @@ logger = logging.getLogger(__name__) -class Redshift( - RedshiftCreateTable, - RedshiftCopyTable, - RedshiftTableUtilities, - RedshiftSchema, - Alchemy, - DatabaseConnector, -): +class Redshift(RedshiftCreateTable, RedshiftCopyTable, RedshiftTableUtilities, RedshiftSchema, + Alchemy): """ A Redshift class to connect to database. @@ -66,50 +58,27 @@ class Redshift( iam_role: str AWS IAM Role ARN string -- an optional, different way for credentials to be provided in the Redshift copy command that does not require an access key. - use_env_token: bool - Controls use of the ``AWS_SESSION_TOKEN`` environment variable for S3. Defaults - to ``True``. Set to ``False`` in order to ignore the ``AWS_SESSION_TOKEN`` environment - variable even if the ``aws_session_token`` argument was not passed in. """ - def __init__( - self, - username=None, - password=None, - host=None, - db=None, - port=None, - timeout=10, - s3_temp_bucket=None, - aws_access_key_id=None, - aws_secret_access_key=None, - iam_role=None, - use_env_token=True, - ): + def __init__(self, username=None, password=None, host=None, db=None, port=None, + timeout=10, s3_temp_bucket=None, + aws_access_key_id=None, aws_secret_access_key=None, iam_role=None): super().__init__() try: - self.username = username or os.environ["REDSHIFT_USERNAME"] - self.password = password or os.environ["REDSHIFT_PASSWORD"] - self.host = host or os.environ["REDSHIFT_HOST"] - self.db = db or os.environ["REDSHIFT_DB"] - self.port = port or os.environ["REDSHIFT_PORT"] + self.username = username or os.environ['REDSHIFT_USERNAME'] + self.password = password or os.environ['REDSHIFT_PASSWORD'] + self.host = host or os.environ['REDSHIFT_HOST'] + self.db = db or os.environ['REDSHIFT_DB'] + self.port = port or os.environ['REDSHIFT_PORT'] except KeyError as error: - logger.error( - "Connection info missing. Most include as kwarg or " "env variable." - ) + logger.error("Connection info missing. Most include as kwarg or " + "env variable.") raise error self.timeout = timeout - self.dialect = "redshift" - self.s3_temp_bucket = s3_temp_bucket or os.environ.get("S3_TEMP_BUCKET") - # Set prefix for temp S3 bucket paths that include subfolders - self.s3_temp_bucket_prefix = None - if self.s3_temp_bucket and "/" in self.s3_temp_bucket: - split_temp_bucket_name = self.s3_temp_bucket.split("/", 1) - self.s3_temp_bucket = split_temp_bucket_name[0] - self.s3_temp_bucket_prefix = split_temp_bucket_name[1] - self.use_env_token = use_env_token + self.dialect = 'redshift' + self.s3_temp_bucket = s3_temp_bucket or os.environ.get('S3_TEMP_BUCKET') # We don't check/load the environment variables for aws_* here # because the logic in S3() and rs_copy_table.py does already. self.aws_access_key_id = aws_access_key_id @@ -132,14 +101,9 @@ def connection(self): """ # Create a psycopg2 connection and cursor - conn = psycopg2.connect( - user=self.username, - password=self.password, - host=self.host, - dbname=self.db, - port=self.port, - connect_timeout=self.timeout, - ) + conn = psycopg2.connect(user=self.username, password=self.password, + host=self.host, dbname=self.db, port=self.port, + connect_timeout=self.timeout) try: yield conn @@ -155,7 +119,7 @@ def cursor(self, connection): finally: cur.close() - def query(self, sql: str, parameters: Optional[list] = None) -> Optional[Table]: + def query(self, sql, parameters=None): """ Execute a query against the Redshift database. Will return ``None`` if the query returns zero rows. @@ -224,8 +188,9 @@ def query_with_connection(self, sql, connection, parameters=None, commit=True): # rows in the correct order with self.cursor(connection) as cursor: - if "credentials" not in sql: - logger.debug(f"SQL Query: {sql}") + + if 'credentials' not in sql: + logger.debug(f'SQL Query: {sql}') cursor.execute(sql, parameters) if commit: @@ -233,17 +198,18 @@ def query_with_connection(self, sql, connection, parameters=None, commit=True): # If the cursor is empty, don't cause an error if not cursor.description: - logger.debug("Query returned 0 rows") + logger.debug('Query returned 0 rows') return None else: + # Fetch the data in batches, and "pickle" the rows to a temp file. # (We pickle rather than writing to, say, a CSV, so that we maintain # all the type information for each field.) temp_file = files.create_temp_file() - with open(temp_file, "wb") as f: + with open(temp_file, 'wb') as f: # Grab the header header = [i[0] for i in cursor.description] pickle.dump(header, f) @@ -253,51 +219,25 @@ def query_with_connection(self, sql, connection, parameters=None, commit=True): if not batch: break - logger.debug(f"Fetched {len(batch)} rows.") + logger.debug(f'Fetched {len(batch)} rows.') for row in batch: pickle.dump(list(row), f) # Load a Table from the file final_tbl = Table(petl.frompickle(temp_file)) - logger.debug(f"Query returned {final_tbl.num_rows} rows.") + logger.debug(f'Query returned {final_tbl.num_rows} rows.') return final_tbl - def copy_s3( - self, - table_name, - bucket, - key, - manifest=False, - data_type="csv", - csv_delimiter=",", - compression=None, - if_exists="fail", - max_errors=0, - distkey=None, - sortkey=None, - padding=None, - varchar_max=None, - statupdate=True, - compupdate=True, - ignoreheader=1, - acceptanydate=True, - dateformat="auto", - timeformat="auto", - emptyasnull=True, - blanksasnull=True, - nullas=None, - acceptinvchars=True, - truncatecolumns=False, - columntypes=None, - specifycols=None, - aws_access_key_id=None, - aws_secret_access_key=None, - bucket_region=None, - strict_length=True, - template_table=None, - line_delimited=False, - ): + def copy_s3(self, table_name, bucket, key, manifest=False, data_type='csv', + csv_delimiter=',', compression=None, if_exists='fail', max_errors=0, + distkey=None, sortkey=None, padding=None, varchar_max=None, + statupdate=True, compupdate=True, ignoreheader=1, acceptanydate=True, + dateformat='auto', timeformat='auto', emptyasnull=True, + blanksasnull=True, nullas=None, acceptinvchars=True, truncatecolumns=False, + columntypes=None, specifycols=None, + aws_access_key_id=None, aws_secret_access_key=None, bucket_region=None, + strict_length=True, template_table=None): """ Copy a file from s3 to Redshift. @@ -399,106 +339,58 @@ def copy_s3( """ with self.connection() as connection: + if self._create_table_precheck(connection, table_name, if_exists): if template_table: - sql = f"CREATE TABLE {table_name} (LIKE {template_table})" + sql = f'CREATE TABLE {table_name} (LIKE {template_table})' else: # Grab the object from s3 from parsons.aws.s3 import S3 - - s3 = S3( - aws_access_key_id=aws_access_key_id, - aws_secret_access_key=aws_secret_access_key, - use_env_token=self.use_env_token, - ) + s3 = S3(aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) local_path = s3.get_file(bucket, key) - if data_type == "csv": + if data_type == 'csv': tbl = Table.from_csv(local_path, delimiter=csv_delimiter) - elif data_type == "json": - tbl = Table.from_json(local_path, line_delimited=line_delimited) else: raise TypeError("Invalid data type provided") # Create the table - sql = self.create_statement( - tbl, - table_name, - padding=padding, - distkey=distkey, - sortkey=sortkey, - varchar_max=varchar_max, - columntypes=columntypes, - strict_length=strict_length, - ) + sql = self.create_statement(tbl, table_name, padding=padding, + distkey=distkey, sortkey=sortkey, + varchar_max=varchar_max, + columntypes=columntypes, + strict_length=strict_length) self.query_with_connection(sql, connection, commit=False) - logger.info(f"{table_name} created.") + logger.info(f'{table_name} created.') # Copy the table - logger.info(f"Data type is {data_type}") - copy_sql = self.copy_statement( - table_name, - bucket, - key, - manifest=manifest, - data_type=data_type, - csv_delimiter=csv_delimiter, - compression=compression, - max_errors=max_errors, - statupdate=statupdate, - compupdate=compupdate, - aws_access_key_id=aws_access_key_id, - aws_secret_access_key=aws_secret_access_key, - ignoreheader=ignoreheader, - acceptanydate=acceptanydate, - emptyasnull=emptyasnull, - blanksasnull=blanksasnull, - nullas=nullas, - acceptinvchars=acceptinvchars, - truncatecolumns=truncatecolumns, - specifycols=specifycols, - dateformat=dateformat, - timeformat=timeformat, - bucket_region=bucket_region, - ) + copy_sql = self.copy_statement(table_name, bucket, key, manifest=manifest, + data_type=data_type, csv_delimiter=csv_delimiter, + compression=compression, max_errors=max_errors, + statupdate=statupdate, compupdate=compupdate, + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key, + ignoreheader=ignoreheader, acceptanydate=acceptanydate, + emptyasnull=emptyasnull, blanksasnull=blanksasnull, + nullas=nullas, acceptinvchars=acceptinvchars, + truncatecolumns=truncatecolumns, + specifycols=specifycols, + dateformat=dateformat, timeformat=timeformat, + bucket_region=bucket_region) self.query_with_connection(copy_sql, connection, commit=False) - logger.info(f"Data copied to {table_name}.") - - def copy( - self, - tbl: Table, - table_name: str, - if_exists: str = "fail", - max_errors: int = 0, - distkey: Optional[str] = None, - sortkey: Optional[str] = None, - padding: Optional[float] = None, - statupdate: Optional[bool] = None, - compupdate: Optional[bool] = None, - acceptanydate: bool = True, - emptyasnull: bool = True, - blanksasnull: bool = True, - nullas: Optional[str] = None, - acceptinvchars: bool = True, - dateformat: str = "auto", - timeformat: str = "auto", - varchar_max: Optional[List[str]] = None, - truncatecolumns: bool = False, - columntypes: Optional[dict] = None, - specifycols: Optional[bool] = None, - alter_table: bool = False, - alter_table_cascade: bool = False, - aws_access_key_id: Optional[str] = None, - aws_secret_access_key: Optional[str] = None, - iam_role: Optional[str] = None, # Unused - Should we remove? - cleanup_s3_file: bool = True, - template_table: Optional[str] = None, - temp_bucket_region: Optional[str] = None, - strict_length: bool = True, - csv_encoding: str = "utf-8", - ): + logger.info(f'Data copied to {table_name}.') + + def copy(self, tbl, table_name, if_exists='fail', max_errors=0, distkey=None, + sortkey=None, padding=None, statupdate=None, compupdate=None, acceptanydate=True, + emptyasnull=True, blanksasnull=True, nullas=None, acceptinvchars=True, + dateformat='auto', timeformat='auto', varchar_max=None, truncatecolumns=False, + columntypes=None, specifycols=None, alter_table=False, alter_table_cascade=False, + aws_access_key_id=None, aws_secret_access_key=None, iam_role=None, + cleanup_s3_file=True, template_table=None, temp_bucket_region=None, + strict_length=True): """ Copy a :ref:`parsons-table` to Redshift. @@ -520,6 +412,9 @@ def copy( padding: float A percentage padding to add to varchar columns if creating a new table. This is helpful to add a buffer for future copies in which the data might be wider. + varchar_max: list + A list of columns in which to set the width of the varchar column to 65,535 + characters. statupate: boolean Governs automatic computation and refresh of optimizer statistics at the end of a successful COPY command. If ``True`` explicitly sets ``statupate`` to on, if @@ -557,9 +452,6 @@ def copy( Set the date format. Defaults to ``auto``. timeformat: str Set the time format. Defaults to ``auto``. - varchar_max: list - A list of columns in which to set the width of the varchar column to 65,535 - characters. truncatecolumns: boolean If the table already exists, truncates data in columns to the appropriate number of characters so that it fits the column specification. Applies only to columns @@ -607,10 +499,7 @@ def copy( in a different region from the temp bucket. strict_length: bool Whether or not to tightly fit the length of the table columns to the length - of the data in ``tbl``; if ``padding`` is specified, this argument is ignored. - csv_ecoding: str - String encoding to use when writing the temporary CSV file that is uploaded to S3. - Defaults to 'utf-8'. + of the data in ``tbl``; if ``padding`` is specified, this argument is ignored `Returns` Parsons Table or ``None`` @@ -624,99 +513,70 @@ def copy( cols = None with self.connection() as connection: + # Check to see if the table exists. If it does not or if_exists = drop, then # create the new table. if self._create_table_precheck(connection, table_name, if_exists): if template_table: # Copy the schema from the template table - sql = f"CREATE TABLE {table_name} (LIKE {template_table})" + sql = f'CREATE TABLE {table_name} (LIKE {template_table})' else: - sql = self.create_statement( - tbl, - table_name, - padding=padding, - distkey=distkey, - sortkey=sortkey, - varchar_max=varchar_max, - columntypes=columntypes, - strict_length=strict_length, - ) + sql = self.create_statement(tbl, table_name, padding=padding, + distkey=distkey, sortkey=sortkey, + varchar_max=varchar_max, + columntypes=columntypes, + strict_length=strict_length) self.query_with_connection(sql, connection, commit=False) - logger.info(f"{table_name} created.") + logger.info(f'{table_name} created.') # If alter_table is True, then alter table if the table column widths # are wider than the existing table. if alter_table: self.alter_varchar_column_widths( - tbl, table_name, drop_dependencies=alter_table_cascade - ) + tbl, table_name, drop_dependencies=alter_table_cascade) # Upload the table to S3 - key = self.temp_s3_copy( - tbl, - aws_access_key_id=aws_access_key_id, - aws_secret_access_key=aws_secret_access_key, - csv_encoding=csv_encoding, - ) + key = self.temp_s3_copy(tbl, aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) try: # Copy to Redshift database. - copy_args = { - "max_errors": max_errors, - "ignoreheader": 1, - "statupdate": statupdate, - "compupdate": compupdate, - "acceptanydate": acceptanydate, - "dateformat": dateformat, - "timeformat": timeformat, - "blanksasnull": blanksasnull, - "nullas": nullas, - "emptyasnull": emptyasnull, - "acceptinvchars": acceptinvchars, - "truncatecolumns": truncatecolumns, - "specifycols": cols, - "aws_access_key_id": aws_access_key_id, - "aws_secret_access_key": aws_secret_access_key, - "compression": "gzip", - "bucket_region": temp_bucket_region, - } + copy_args = {'max_errors': max_errors, + 'ignoreheader': 1, + 'statupdate': statupdate, + 'compupdate': compupdate, + 'acceptanydate': acceptanydate, + 'dateformat': dateformat, + 'timeformat': timeformat, + 'blanksasnull': blanksasnull, + 'nullas': nullas, + 'emptyasnull': emptyasnull, + 'acceptinvchars': acceptinvchars, + 'truncatecolumns': truncatecolumns, + 'specifycols': cols, + 'aws_access_key_id': aws_access_key_id, + 'aws_secret_access_key': aws_secret_access_key, + 'compression': 'gzip', + 'bucket_region': temp_bucket_region} # Copy from S3 to Redshift - sql = self.copy_statement( - table_name, self.s3_temp_bucket, key, **copy_args - ) + sql = self.copy_statement(table_name, self.s3_temp_bucket, key, **copy_args) sql_censored = sql_helpers.redact_credentials(sql) - logger.debug(f"Copy SQL command: {sql_censored}") + logger.debug(f'Copy SQL command: {sql_censored}') self.query_with_connection(sql, connection, commit=False) - logger.info(f"Data copied to {table_name}.") + logger.info(f'Data copied to {table_name}.') # Clean up the S3 bucket. finally: if key and cleanup_s3_file: self.temp_s3_delete(key) - def unload( - self, - sql, - bucket, - key_prefix, - manifest=True, - header=True, - delimiter="|", - compression="gzip", - add_quotes=True, - null_as=None, - escape=True, - allow_overwrite=True, - parallel=True, - max_file_size="6.2 GB", - extension=None, - aws_region=None, - aws_access_key_id=None, - aws_secret_access_key=None, - ): + def unload(self, sql, bucket, key_prefix, manifest=True, header=True, delimiter='|', + compression='gzip', add_quotes=True, null_as=None, escape=True, allow_overwrite=True, + parallel=True, max_file_size='6.2 GB', aws_region=None, aws_access_key_id=None, + aws_secret_access_key=None): """ Unload Redshift data to S3 Bucket. This is a more efficient method than running a query to export data as it can export in parallel and directly into an S3 bucket. Consider @@ -758,8 +618,6 @@ def unload( max_file_size: str The maximum size of files UNLOAD creates in Amazon S3. Specify a decimal value between 5 MB and 6.2 GB. - extension: str - This extension will be added to the end of file names loaded to S3 region: str The AWS Region where the target Amazon S3 bucket is located. REGION is required for UNLOAD to an Amazon S3 bucket that is not in the same AWS Region as the Amazon Redshift @@ -799,29 +657,19 @@ def unload( statement += "ESCAPE \n" if allow_overwrite: statement += "ALLOWOVERWRITE \n" - if extension: - statement += f"EXTENSION '{extension}' \n" if aws_region: statement += f"REGION {aws_region} \n" - logger.info(f"Unloading data to s3://{bucket}/{key_prefix}") + logger.info(f'Unloading data to s3://{bucket}/{key_prefix}') # Censor sensitive data statement_censored = sql_helpers.redact_credentials(statement) logger.debug(statement_censored) return self.query(statement) - def generate_manifest( - self, - buckets, - aws_access_key_id=None, - aws_secret_access_key=None, - mandatory=True, - prefix=None, - manifest_bucket=None, - manifest_key=None, - path=None, - ): + def generate_manifest(self, buckets, aws_access_key_id=None, aws_secret_access_key=None, + mandatory=True, prefix=None, manifest_bucket=None, manifest_key=None, + path=None): """ Given a list of S3 buckets, generate a manifest file (JSON format). A manifest file allows you to copy multiple files into a single table at once. Once the manifest is @@ -853,58 +701,44 @@ def generate_manifest( """ from parsons.aws import S3 - - s3 = S3( - aws_access_key_id=aws_access_key_id, - aws_secret_access_key=aws_secret_access_key, - use_env_token=self.use_env_token, - ) + s3 = S3(aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) # Deal with a single bucket being passed, rather than list. if isinstance(buckets, str): buckets = [buckets] # Generate manifest file - manifest = {"entries": []} + manifest = {'entries': []} for bucket in buckets: + # Retrieve list of files in bucket key_list = s3.list_keys(bucket, prefix=prefix) for key in key_list: - manifest["entries"].append( - {"url": "/".join(["s3:/", bucket, key]), "mandatory": mandatory} - ) + manifest['entries'].append({ + 'url': '/'.join(['s3:/', bucket, key]), + 'mandatory': mandatory + }) - logger.info("Manifest generated.") + logger.info('Manifest generated.') # Save the file to s3 bucket if provided if manifest_key and manifest_bucket: # Dump the manifest to a temp JSON file manifest_path = files.create_temp_file() - with open(manifest_path, "w") as manifest_file_obj: + with open(manifest_path, 'w') as manifest_file_obj: json.dump(manifest, manifest_file_obj, sort_keys=True, indent=4) # Upload the file to S3 s3.put_file(manifest_bucket, manifest_key, manifest_path) - logger.info(f"Manifest saved to s3://{manifest_bucket}/{manifest_key}") + logger.info(f'Manifest saved to s3://{manifest_bucket}/{manifest_key}') return manifest - def upsert( - self, - table_obj, - target_table, - primary_key, - vacuum=True, - distinct_check=True, - cleanup_temp_table=True, - alter_table=True, - alter_table_cascade=False, - from_s3=False, - distkey=None, - sortkey=None, - **copy_args, - ): + def upsert(self, table_obj, target_table, primary_key, vacuum=True, distinct_check=True, + cleanup_temp_table=True, alter_table=True, alter_table_cascade=False, + from_s3=False, distkey=None, sortkey=None, **copy_args): """ Preform an upsert on an existing table. An upsert is a function in which rows in a table are updated and inserted at the same time. @@ -952,28 +786,24 @@ def upsert( sortkey = sortkey or primary_key if not self.table_exists(target_table): - logger.info( - "Target table does not exist. Copying into newly \ - created target table." - ) + logger.info('Target table does not exist. Copying into newly \ + created target table.') self.copy(table_obj, target_table, distkey=distkey, sortkey=sortkey) return None if alter_table and table_obj: # Make target table column widths match incoming table, if necessary - self.alter_varchar_column_widths( - table_obj, target_table, drop_dependencies=alter_table_cascade - ) + self.alter_varchar_column_widths(table_obj, target_table, + drop_dependencies=alter_table_cascade) - noise = f"{random.randrange(0, 10000):04}"[:4] - date_stamp = datetime.datetime.now().strftime("%Y%m%d_%H%M") + noise = f'{random.randrange(0, 10000):04}'[:4] + date_stamp = datetime.datetime.now().strftime('%Y%m%d_%H%M') # Generate a temp table like "table_tmp_20200210_1230_14212" - staging_tbl = "{}_stg_{}_{}".format(target_table, date_stamp, noise) + staging_tbl = '{}_stg_{}_{}'.format(target_table, date_stamp, noise) if distinct_check: - primary_keys_statement = ", ".join(primary_keys) - diff = self.query( - f""" + primary_keys_statement = ', '.join(primary_keys) + diff = self.query(f''' select ( select count(*) from {target_table} @@ -983,16 +813,16 @@ def upsert( from {target_table} ) ) as total_count - """ - ).first + ''').first if diff > 0: - raise ValueError("Primary key column contains duplicate values.") + raise ValueError('Primary key column contains duplicate values.') with self.connection() as connection: + try: # Copy to a staging table - logger.info(f"Building staging table: {staging_tbl}") - if "compupdate" not in copy_args: + logger.info(f'Building staging table: {staging_tbl}') + if 'compupdate' not in copy_args: # Especially with a lot of columns, compupdate=True can # cause a lot of processing/analysis by Redshift before upload. # Since this is a temporary table, setting compression for each @@ -1003,30 +833,29 @@ def upsert( if from_s3: if table_obj is not None: raise ValueError( - "upsert(... from_s3=True) requires the first argument (table_obj)" - " to be None. from_s3 and table_obj are mutually exclusive." + 'upsert(... from_s3=True) requires the first argument (table_obj)' + ' to be None. from_s3 and table_obj are mutually exclusive.' ) - self.copy_s3(staging_tbl, template_table=target_table, **copy_args) + self.copy_s3(staging_tbl, + template_table=target_table, + **copy_args) else: - self.copy( - table_obj, - staging_tbl, - template_table=target_table, - alter_table=False, # We just did our own alter table above - distkey=distkey, - sortkey=sortkey, - **copy_args, - ) + self.copy(table_obj, staging_tbl, + template_table=target_table, + alter_table=False, # We just did our own alter table above + distkey=distkey, + sortkey=sortkey, + **copy_args) - staging_table_name = staging_tbl.split(".")[1] - target_table_name = target_table.split(".")[1] + staging_table_name = staging_tbl.split('.')[1] + target_table_name = target_table.split('.')[1] # Delete rows comparisons = [ - f"{staging_table_name}.{primary_key} = {target_table_name}.{primary_key}" + f'{staging_table_name}.{primary_key} = {target_table_name}.{primary_key}' for primary_key in primary_keys ] - where_clause = " and ".join(comparisons) + where_clause = ' and '.join(comparisons) sql = f""" DELETE FROM {target_table} @@ -1034,7 +863,7 @@ def upsert( WHERE {where_clause} """ self.query_with_connection(sql, connection, commit=False) - logger.debug(f"Target rows deleted from {target_table}.") + logger.debug(f'Target rows deleted from {target_table}.') # Insert rows # ALTER TABLE APPEND would be more efficient, but you can't run it in a @@ -1046,22 +875,21 @@ def upsert( """ self.query_with_connection(sql, connection, commit=False) - logger.info(f"Target rows inserted to {target_table}") + logger.info(f'Target rows inserted to {target_table}') finally: if cleanup_temp_table: # Drop the staging table - self.query_with_connection( - f"DROP TABLE IF EXISTS {staging_tbl};", connection, commit=False - ) - logger.info(f"{staging_tbl} staging table dropped.") + self.query_with_connection(f"DROP TABLE IF EXISTS {staging_tbl};", + connection, commit=False) + logger.info(f'{staging_tbl} staging table dropped.') # Vacuum table. You must commit when running this type of transaction. if vacuum: with self.connection() as connection: connection.set_session(autocommit=True) - self.query_with_connection(f"VACUUM {target_table};", connection) - logger.info(f"{target_table} vacuumed.") + self.query_with_connection(f'VACUUM {target_table};', connection) + logger.info(f'{target_table} vacuumed.') def drop_dependencies_for_cols(self, schema, table, cols): fmt_cols = ", ".join([f"'{c}'" for c in cols]) @@ -1091,11 +919,9 @@ def drop_dependencies_for_cols(self, schema, table, cols): with self.connection() as connection: connection.set_session(autocommit=True) tbl = self.query_with_connection(sql_depend, connection) - dropped_views = [row["table_name"] for row in tbl] + dropped_views = [row['table_name'] for row in tbl] if dropped_views: - sql_drop = "\n".join( - [f"drop view {view} CASCADE;" for view in dropped_views] - ) + sql_drop = "\n".join([f"drop view {view};" for view in dropped_views]) tbl = self.query_with_connection(sql_drop, connection) logger.info(f"Dropped the following views: {dropped_views}") @@ -1125,32 +951,19 @@ def alter_varchar_column_widths(self, tbl, table_name, drop_dependencies=False): # Determine the max width of the varchar columns in the Redshift table s, t = self.split_full_table_name(table_name) cols = self.get_columns(s, t) - rc = { - k: v["max_length"] - for k, v in cols.items() - if v["data_type"] == "character varying" - } # noqa: E501, E261 + rc = {k: v['max_length'] for k, v in cols.items() if v['data_type'] == 'character varying'} # noqa: E501, E261 + if drop_dependencies: + self.drop_dependencies_for_cols(s, t, rc.keys()) # Figure out if any of the destination table varchar columns are smaller than the # associated Parsons table columns. If they are, then alter column types to expand # their width. for c in set(rc.keys()).intersection(set(pc.keys())): - if rc[c] < pc[c] and rc[c] != 65535: - logger.info(f"{c} not wide enough. Expanding column width.") - # If requested size is larger than Redshift will allow, - # automatically set to Redshift's max varchar width - new_size = 65535 - if pc[c] < new_size: - new_size = pc[c] - if drop_dependencies: - self.drop_dependencies_for_cols(s, t, [c]) - self.alter_table_column_type( - table_name, c, "varchar", varchar_width=new_size - ) - - def alter_table_column_type( - self, table_name, column_name, data_type, varchar_width=None - ): + if rc[c] < pc[c]: + logger.info(f'{c} not wide enough. Expanding column width.') + self.alter_table_column_type(table_name, c, 'varchar', varchar_width=pc[c]) + + def alter_table_column_type(self, table_name, column_name, data_type, varchar_width=None): """ Alter a column type of an existing table. @@ -1172,7 +985,7 @@ def alter_table_column_type( with self.connection() as connection: connection.set_session(autocommit=True) self.query_with_connection(sql, connection) - logger.info(f"Altered {table_name} {column_name}.") + logger.info(f'Altered {table_name} {column_name}.') def table(self, table_name): # Return a Redshift table object diff --git a/parsons/databases/redshift/rs_copy_table.py b/parsons/databases/redshift/rs_copy_table.py index 7b4fa578b7..33ceb254cc 100644 --- a/parsons/databases/redshift/rs_copy_table.py +++ b/parsons/databases/redshift/rs_copy_table.py @@ -9,43 +9,25 @@ class RedshiftCopyTable(object): + aws_access_key_id = None aws_secret_access_key = None iam_role = None - def __init__(self, use_env_token=True): - self.use_env_token = use_env_token - - def copy_statement( - self, - table_name, - bucket, - key, - manifest=False, - data_type="csv", - csv_delimiter=",", - max_errors=0, - statupdate=None, - compupdate=None, - ignoreheader=1, - acceptanydate=True, - dateformat="auto", - timeformat="auto", - emptyasnull=True, - blanksasnull=True, - nullas=None, - acceptinvchars=True, - truncatecolumns=False, - specifycols=None, - aws_access_key_id=None, - aws_secret_access_key=None, - compression=None, - bucket_region=None, - json_option="auto", - ): - logger.info(f"Data type is {data_type}") + def __init__(self): + + pass + + def copy_statement(self, table_name, bucket, key, manifest=False, + data_type='csv', csv_delimiter=',', max_errors=0, + statupdate=None, compupdate=None, ignoreheader=1, acceptanydate=True, + dateformat='auto', timeformat='auto', emptyasnull=True, + blanksasnull=True, nullas=None, acceptinvchars=True, truncatecolumns=False, + specifycols=None, aws_access_key_id=None, aws_secret_access_key=None, + compression=None, bucket_region=None): + # Source / Destination - source = f"s3://{bucket}/{key}" + source = f's3://{bucket}/{key}' # Add column list for mapping or if there are fewer columns on source file col_list = f"({', '.join(specifycols)})" if specifycols is not None else "" @@ -60,9 +42,7 @@ def copy_statement( sql += "manifest \n" if bucket_region: sql += f"region '{bucket_region}'\n" - logger.info( - "Copying data from S3 bucket %s in region %s", bucket, bucket_region - ) + logger.info('Copying data from S3 bucket %s in region %s', bucket, bucket_region) sql += f"maxerror {max_errors} \n" # Redshift has some default behavior when statupdate is left out @@ -99,21 +79,20 @@ def copy_statement( sql += "truncatecolumns \n" # Data Type - if data_type == "csv": + if data_type == 'csv': sql += f"csv delimiter '{csv_delimiter}' \n" - elif data_type == "json": - sql += f"json '{json_option}' \n" else: - raise TypeError("Invalid data type specified.") + raise TypeError('Invalid data type specified.') - if compression == "gzip": - sql += "gzip \n" + if compression == 'gzip': + sql += 'gzip \n' - sql += ";" + sql += ';' return sql def get_creds(self, aws_access_key_id, aws_secret_access_key): + if aws_access_key_id and aws_secret_access_key: # When we have credentials, then we don't need to set them again pass @@ -123,63 +102,52 @@ def get_creds(self, aws_access_key_id, aws_secret_access_key): return f"credentials 'aws_iam_role={self.iam_role}'\n" elif self.aws_access_key_id and self.aws_secret_access_key: + aws_access_key_id = self.aws_access_key_id aws_secret_access_key = self.aws_secret_access_key - elif ( - "AWS_ACCESS_KEY_ID" in os.environ and "AWS_SECRET_ACCESS_KEY" in os.environ - ): - aws_access_key_id = os.environ["AWS_ACCESS_KEY_ID"] - aws_secret_access_key = os.environ["AWS_SECRET_ACCESS_KEY"] + elif 'AWS_ACCESS_KEY_ID' in os.environ and 'AWS_SECRET_ACCESS_KEY' in os.environ: + + aws_access_key_id = os.environ['AWS_ACCESS_KEY_ID'] + aws_secret_access_key = os.environ['AWS_SECRET_ACCESS_KEY'] else: - s3 = S3(use_env_token=self.use_env_token) + + s3 = S3() creds = s3.aws.session.get_credentials() aws_access_key_id = creds.access_key aws_secret_access_key = creds.secret_key return "credentials 'aws_access_key_id={};aws_secret_access_key={}'\n".format( - aws_access_key_id, aws_secret_access_key - ) - - def temp_s3_copy( - self, - tbl, - aws_access_key_id=None, - aws_secret_access_key=None, - csv_encoding="utf-8", - ): + aws_access_key_id, + aws_secret_access_key) + + def temp_s3_copy(self, tbl, aws_access_key_id=None, aws_secret_access_key=None): + if not self.s3_temp_bucket: - raise KeyError( - ( - "Missing S3_TEMP_BUCKET, needed for transferring data to Redshift. " - "Must be specified as env vars or kwargs" - ) - ) + raise KeyError(("Missing S3_TEMP_BUCKET, needed for transferring data to Redshift. " + "Must be specified as env vars or kwargs" + )) # Coalesce S3 Key arguments aws_access_key_id = aws_access_key_id or self.aws_access_key_id aws_secret_access_key = aws_secret_access_key or self.aws_secret_access_key - self.s3 = S3( - aws_access_key_id=aws_access_key_id, - aws_secret_access_key=aws_secret_access_key, - use_env_token=self.use_env_token, - ) + self.s3 = S3(aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) hashed_name = hash(time.time()) key = f"{S3_TEMP_KEY_PREFIX}/{hashed_name}.csv.gz" - if self.s3_temp_bucket_prefix: - key = self.s3_temp_bucket_prefix + "/" + key # Convert table to compressed CSV file, to optimize the transfers to S3 and to # Redshift. - local_path = tbl.to_csv(temp_file_compression="gzip", encoding=csv_encoding) + local_path = tbl.to_csv(temp_file_compression='gzip') # Copy table to bucket self.s3.put_file(self.s3_temp_bucket, key, local_path) return key def temp_s3_delete(self, key): + if key: self.s3.remove_file(self.s3_temp_bucket, key) diff --git a/parsons/databases/redshift/rs_create_table.py b/parsons/databases/redshift/rs_create_table.py index 7d37a81240..0cc79b80e9 100644 --- a/parsons/databases/redshift/rs_create_table.py +++ b/parsons/databases/redshift/rs_create_table.py @@ -8,6 +8,7 @@ class RedshiftCreateTable(DatabaseCreateStatement): + def __init__(self): super().__init__() @@ -40,20 +41,12 @@ def _rename_reserved_word(self, col, index): """ return f"col_{index}" - def create_statement( - self, - tbl, - table_name, - padding=None, - distkey=None, - sortkey=None, - varchar_max=None, - varchar_truncate=True, - columntypes=None, - strict_length=True, - ): + def create_statement(self, tbl, table_name, padding=None, distkey=None, sortkey=None, + varchar_max=None, varchar_truncate=True, columntypes=None, + strict_length=True): + # Warn the user if they don't provide a DIST key or a SORT key - self._log_key_warning(distkey=distkey, sortkey=sortkey, method="copy") + self._log_key_warning(distkey=distkey, sortkey=sortkey, method='copy') # Generate a table create statement @@ -61,32 +54,32 @@ def create_statement( tbl.table = petl.setheader(tbl.table, self.column_name_validate(tbl.columns)) if tbl.num_rows == 0: - raise ValueError("Table is empty. Must have 1 or more rows.") + raise ValueError('Table is empty. Must have 1 or more rows.') mapping = self.generate_data_types(tbl) if padding: - mapping["longest"] = self.vc_padding(mapping, padding) + mapping['longest'] = self.vc_padding(mapping, padding) elif not strict_length: - mapping["longest"] = self.vc_step(mapping) + mapping['longest'] = self.vc_step(mapping) if varchar_max: - mapping["longest"] = self.vc_max(mapping, varchar_max) + mapping['longest'] = self.vc_max(mapping, varchar_max) if varchar_truncate: - mapping["longest"] = self.vc_trunc(mapping) + mapping['longest'] = self.vc_trunc(mapping) - mapping["longest"] = self.vc_validate(mapping) + mapping['longest'] = self.vc_validate(mapping) # Add any provided column type overrides if columntypes: - for i in range(len(mapping["headers"])): - col = mapping["headers"][i] + for i in range(len(mapping['headers'])): + col = mapping['headers'][i] if columntypes.get(col): - mapping["type_list"][i] = columntypes[col] + mapping['type_list'][i] = columntypes[col] # Enclose in quotes - mapping["headers"] = ['"{}"'.format(h) for h in mapping["headers"]] + mapping['headers'] = ['"{}"'.format(h) for h in mapping['headers']] return self.create_sql(table_name, mapping, distkey=distkey, sortkey=sortkey) @@ -108,125 +101,116 @@ def generate_data_types(self, table): # Populate empty values for the columns for col in table.columns: longest.append(0) - type_list.append("") + type_list.append('') for row in cont: for i in range(len(row)): # NA is the csv null value - if type_list[i] == "varchar" or row[i] in ["NA", ""]: + if type_list[i] == 'varchar' or row[i] in ['NA', '']: pass else: var_type = self.data_type(row[i], type_list[i]) type_list[i] = var_type # Calculate width - if len(str(row[i]).encode("utf-8")) > longest[i]: - longest[i] = len(str(row[i]).encode("utf-8")) + if len(str(row[i]).encode('utf-8')) > longest[i]: + longest[i] = len(str(row[i]).encode('utf-8')) # In L138 'NA' and '' will be skipped # If the entire column is either one of those (or a mix of the two) # the type will be empty. # Fill with a default varchar - type_list = [typ or "varchar" for typ in type_list] + type_list = [typ or 'varchar' for typ in type_list] - return {"longest": longest, "headers": table.columns, "type_list": type_list} + return {'longest': longest, + 'headers': table.columns, + 'type_list': type_list} def vc_padding(self, mapping, padding): # Pad the width of a varchar column - return [int(c + (c * padding)) for c in mapping["longest"]] + return [int(c + (c * padding)) for c in mapping['longest']] def vc_step(self, mapping): - return [self.round_longest(c) for c in mapping["longest"]] + return [self.round_longest(c) for c in mapping['longest']] def vc_max(self, mapping, columns): # Set the varchar width of a column to the maximum for c in columns: + try: - idx = mapping["headers"].index(c) - mapping["longest"][idx] = self.VARCHAR_MAX + idx = mapping['headers'].index(c) + mapping['longest'][idx] = self.VARCHAR_MAX except KeyError as error: - logger.error("Could not find column name provided.") + logger.error('Could not find column name provided.') raise error - return mapping["longest"] + return mapping['longest'] def vc_trunc(self, mapping): - return [ - self.VARCHAR_MAX if c > self.VARCHAR_MAX else c for c in mapping["longest"] - ] + + return [self.VARCHAR_MAX if c > self.VARCHAR_MAX else c for c in mapping['longest']] def vc_validate(self, mapping): - return [1 if c == 0 else c for c in mapping["longest"]] + + return [1 if c == 0 else c for c in mapping['longest']] def create_sql(self, table_name, mapping, distkey=None, sortkey=None): # Generate the sql to create the table - statement = "create table {} (".format(table_name) + statement = 'create table {} ('.format(table_name) - for i in range(len(mapping["headers"])): - if mapping["type_list"][i] == "varchar": - statement = (statement + "\n {} varchar({}),").format( - str(mapping["headers"][i]).lower(), str(mapping["longest"][i]) - ) + for i in range(len(mapping['headers'])): + if mapping['type_list'][i] == 'varchar': + statement = (statement + '\n {} varchar({}),').format(str(mapping['headers'][i]) + .lower(), + str(mapping['longest'][i])) else: - statement = (statement + "\n " + "{} {}" + ",").format( - str(mapping["headers"][i]).lower(), mapping["type_list"][i] - ) + statement = (statement + '\n ' + '{} {}' + ',').format(str(mapping['headers'][i]) + .lower(), + mapping['type_list'][i]) - statement = statement[:-1] + ") " + statement = statement[:-1] + ') ' if distkey: - statement += "\ndistkey({}) ".format(distkey) + statement += '\ndistkey({}) '.format(distkey) if sortkey and isinstance(sortkey, list): - statement += "\ncompound sortkey(" - statement += ", ".join(sortkey) - statement += ")" + statement += '\ncompound sortkey(' + statement += ', '.join(sortkey) + statement += ')' elif sortkey: - statement += "\nsortkey({})".format(sortkey) + statement += '\nsortkey({})'.format(sortkey) - statement += ";" + statement += ';' return statement # This is for backwards compatability def column_name_validate(self, columns): - return self.format_columns(columns, col_prefix="col_") + return self.format_columns( + columns, col_prefix="col_") @staticmethod - def _log_key_warning(distkey=None, sortkey=None, method=""): + def _log_key_warning(distkey=None, sortkey=None, method=''): # Log a warning message advising the user about DIST and SORT keys if distkey and sortkey: return keys = [ - ( - distkey, - "DIST", - "https://aws.amazon.com/about-aws/whats-new/2019/08/amazon-redshift-" - "now-recommends-distribution-keys-for-improved-query-performance/", - ), - ( - sortkey, - "SORT", - "https://docs.amazonaws.cn/en_us/redshift/latest/dg/c_best-practices-" - "sort-key.html", - ), + (distkey, "DIST", "https://aws.amazon.com/about-aws/whats-new/2019/08/amazon-redshift-" + "now-recommends-distribution-keys-for-improved-query-performance/"), + (sortkey, "SORT", "https://docs.amazonaws.cn/en_us/redshift/latest/dg/c_best-practices-" + "sort-key.html") ] - warning = "".join( - [ - "You didn't provide a {} key to method `parsons.redshift.Redshift.{}`.\n" - "You can learn about best practices here:\n{}.\n".format( - keyname, method, keyinfo - ) - for key, keyname, keyinfo in keys - if not key - ] - ) + warning = "".join([ + "You didn't provide a {} key to method `parsons.redshift.Redshift.{}`.\n" + "You can learn about best practices here:\n{}.\n".format( + keyname, method, keyinfo + ) for key, keyname, keyinfo in keys if not key]) warning += "You may be able to further optimize your queries." diff --git a/parsons/databases/redshift/rs_schema.py b/parsons/databases/redshift/rs_schema.py index aed3feec19..77fc14f0cc 100644 --- a/parsons/databases/redshift/rs_schema.py +++ b/parsons/databases/redshift/rs_schema.py @@ -1,4 +1,5 @@ class RedshiftSchema(object): + def schema_exists(self, schema): sql = f"select * from pg_namespace where nspname = '{schema}'" res = self.query(sql) @@ -18,13 +19,13 @@ def create_schema_with_permissions(self, schema, group=None): The type of permissions to grant. Supports `select`, `all`, etc. (For full list, see the `Redshift GRANT docs `_) - """ # noqa: E501,E261 + """ # noqa: E501,E261 if not self.schema_exists(schema): self.query(f"create schema {schema}") self.query(f"grant usage on schema {schema} to group {group}") - def grant_schema_permissions(self, schema, group, permissions_type="select"): + def grant_schema_permissions(self, schema, group, permissions_type='select'): """ Grants a Redshift group permissions to all tables within an existing schema. @@ -37,7 +38,7 @@ def grant_schema_permissions(self, schema, group, permissions_type="select"): The type of permissions to grant. Supports `select`, `all`, etc. (For full list, see the `Redshift GRANT docs `_) - """ # noqa: E501,E261 + """ # noqa: E501,E261 sql = f""" grant usage on schema {schema} to group {group}; diff --git a/parsons/databases/redshift/rs_table_utilities.py b/parsons/databases/redshift/rs_table_utilities.py index ae0c8a5c71..70c20e23ef 100644 --- a/parsons/databases/redshift/rs_table_utilities.py +++ b/parsons/databases/redshift/rs_table_utilities.py @@ -1,15 +1,15 @@ import logging - # import pkgutil logger = logging.getLogger(__name__) class RedshiftTableUtilities(object): + def __init__(self): pass - def table_exists(self, table_name: str, view: bool = True) -> bool: + def table_exists(self, table_name, view=True): """ Check if a table or view exists in the database. @@ -27,14 +27,11 @@ def table_exists(self, table_name: str, view: bool = True) -> bool: return self.table_exists_with_connection(table_name, connection, view) def table_exists_with_connection(self, table_name, connection, view=True): - table_name = table_name.lower().split(".") - table_name = [x.strip() for x in table_name] + table_name = table_name.lower().split('.') # Check in pg tables for the table sql = """select count(*) from pg_tables where schemaname='{}' and - tablename='{}';""".format( - table_name[0], table_name[1] - ) + tablename='{}';""".format(table_name[0], table_name[1]) # TODO maybe convert these queries to use self.query_with_connection @@ -46,19 +43,17 @@ def table_exists_with_connection(self, table_name, connection, view=True): # Check in the pg_views for the table if view: sql = """select count(*) from pg_views where schemaname='{}' and - viewname='{}';""".format( - table_name[0], table_name[1] - ) + viewname='{}';""".format(table_name[0], table_name[1]) cursor.execute(sql) result += cursor.fetchone()[0] # If in either, return boolean if result >= 1: - logger.debug(f"{table_name[0]}.{table_name[1]} exists.") + logger.debug(f'{table_name[0]}.{table_name[1]} exists.') return True else: - logger.debug(f"{table_name[0]}.{table_name[1]} does NOT exist.") + logger.debug(f'{table_name[0]}.{table_name[1]} does NOT exist.') return False def get_row_count(self, table_name): @@ -79,7 +74,7 @@ def get_row_count(self, table_name): """ count_query = self.query(f"select count(*) from {table_name}") - return count_query[0]["count"] + return count_query[0]['count'] def rename_table(self, table_name, new_table_name): """ @@ -117,7 +112,7 @@ def move_table(self, source_table, new_table, drop_source_table=False): Drop the source table. Returns: None - """ # noqa: E501,E261 + """ # noqa: E501,E261 # To Do: Add the grants # To Do: Argument for if the table exists? @@ -126,7 +121,7 @@ def move_table(self, source_table, new_table, drop_source_table=False): create_sql = f"create table {new_table} (like {source_table});" alter_sql = f"alter table {new_table} append from {source_table}" - logger.info(f"Creating empty {new_table} from {source_table}.") + logger.info(f'Creating empty {new_table} from {source_table}.') self.query(create_sql) with self.connection() as conn: @@ -136,14 +131,14 @@ def move_table(self, source_table, new_table, drop_source_table=False): # the connection must be set to autocommit. conn.set_session(autocommit=True) - logger.info(f"Moving data from {source_table} to {new_table}.") + logger.info(f'Moving data from {source_table} to {new_table}.') self.query_with_connection(alter_sql, conn) if drop_source_table: self.query(f"drop table {source_table};") - logger.info(f"{source_table} dropped.") + logger.info(f'{source_table} dropped.') - logger.info(f"{source_table} data moved from {new_table} .") + logger.info(f'{source_table} data moved from {new_table} .') def _create_table_precheck(self, connection, table_name, if_exists): """ @@ -162,20 +157,20 @@ def _create_table_precheck(self, connection, table_name, if_exists): True if the table needs to be created, False otherwise. """ - if if_exists not in ["fail", "truncate", "append", "drop"]: + if if_exists not in ['fail', 'truncate', 'append', 'drop']: raise ValueError("Invalid value for `if_exists` argument") exists = self.table_exists_with_connection(table_name, connection) - if exists and if_exists in ["fail", "truncate", "append"]: - if if_exists == "fail": - raise ValueError("Table already exists.") - elif if_exists == "truncate": + if exists and if_exists in ['fail', 'truncate', 'append']: + if if_exists == 'fail': + raise ValueError('Table already exists.') + elif if_exists == 'truncate': truncate_sql = f"truncate table {table_name}" self.query_with_connection(truncate_sql, connection, commit=False) else: - if exists and if_exists == "drop": + if exists and if_exists == 'drop': logger.debug(f"Table {table_name} exist, will drop...") drop_sql = f"drop table {table_name};\n" self.query_with_connection(drop_sql, connection, commit=False) @@ -184,9 +179,8 @@ def _create_table_precheck(self, connection, table_name, if_exists): return False - def populate_table_from_query( - self, query, destination_table, if_exists="fail", distkey=None, sortkey=None - ): + def populate_table_from_query(self, query, destination_table, if_exists='fail', distkey=None, + sortkey=None): """ Populate a Redshift table with the results of a SQL query, creating the table if it doesn't yet exist. @@ -205,9 +199,7 @@ def populate_table_from_query( The column to use as the sortkey for the table. """ with self.connection() as conn: - should_create = self._create_table_precheck( - conn, destination_table, if_exists - ) + should_create = self._create_table_precheck(conn, destination_table, if_exists) if should_create: logger.info(f"Creating table {destination_table} from query...") @@ -223,16 +215,10 @@ def populate_table_from_query( self.query_with_connection(sql, conn, commit=False) - logger.info(f"{destination_table} created from query") + logger.info(f'{destination_table} created from query') - def duplicate_table( - self, - source_table, - destination_table, - where_clause="", - if_exists="fail", - drop_source_table=False, - ): + def duplicate_table(self, source_table, destination_table, where_clause='', + if_exists='fail', drop_source_table=False): """ Create a copy of an existing table (or subset of rows) in a new table. It will inherit encoding, sortkey and distkey. @@ -252,12 +238,10 @@ def duplicate_table( """ with self.connection() as conn: - should_create = self._create_table_precheck( - conn, destination_table, if_exists - ) + should_create = self._create_table_precheck(conn, destination_table, if_exists) if should_create: - logger.info(f"Creating {destination_table} from {source_table}...") + logger.info(f'Creating {destination_table} from {source_table}...') create_sql = f"create table {destination_table} (like {source_table})" self.query_with_connection(create_sql, conn, commit=False) @@ -267,11 +251,11 @@ def duplicate_table( self.query_with_connection(insert_sql, conn, commit=False) if drop_source_table: - logger.info(f"Dropping table {source_table}...") + logger.info(f'Dropping table {source_table}...') drop_sql = f"drop table {source_table}" self.query_with_connection(drop_sql, conn, commit=False) - logger.info(f"{destination_table} created from {source_table}.") + logger.info(f'{destination_table} created from {source_table}.') def union_tables(self, new_table_name, tables, union_all=True, view=False): """ @@ -406,12 +390,12 @@ def get_columns(self, schema, table_name): """ return { - row["column_name"]: { - "data_type": row["data_type"], - "max_length": row["max_length"], - "max_precision": row["max_precision"], - "max_scale": row["max_scale"], - "is_nullable": row["is_nullable"] == "YES", + row['column_name']: { + 'data_type': row['data_type'], + 'max_length': row['max_length'], + 'max_precision': row['max_precision'], + 'max_scale': row['max_scale'], + 'is_nullable': row['is_nullable'] == 'YES', } for row in self.query(query) } @@ -473,7 +457,7 @@ def get_queries(self): See :ref:`parsons-table` for output options. """ - logger.info("Retrieving running and queued queries.") + logger.info('Retrieving running and queued queries.') # Lifted from Redshift Utils https://github.com/awslabs/amazon-redshift-utils/blob/master/src/AdminScripts/running_queues.sql # noqa: E501 sql = """ @@ -529,9 +513,7 @@ def get_max_value(self, table_name, value_column): The column containing the values """ - return self.query(f"SELECT MAX({value_column}) value from {table_name}")[0][ - "value" - ] + return self.query(f'SELECT MAX({value_column}) value from {table_name}')[0]['value'] def get_object_type(self, object_name): """ @@ -566,7 +548,7 @@ def get_object_type(self, object_name): logger.info(f"{object_name} doesn't exist.") return None - return tbl[0]["object_name"] + return tbl[0]['object_name'] def is_view(self, object_name): """ @@ -643,8 +625,8 @@ def get_table_definitions(self, schema=None, table=None): if table: conditions.append(f"tablename like '{table}'") - conditions_str = " and ".join(conditions) - where_clause = f"where {conditions_str}" if conditions_str else "" + conditions_str = ' and '.join(conditions) + where_clause = f"where {conditions_str}" if conditions_str else '' # ddl_query = pkgutil.get_data( # __name__, "queries/v_generate_tbl_ddl.sql").decode() @@ -660,16 +642,16 @@ def get_table_definitions(self, schema=None, table=None): return None def join_sql_parts(columns, rows): - return [f"{columns[1]}.{columns[2]}", "\n".join([row[4] for row in rows])] + return [f"{columns[1]}.{columns[2]}", + '\n'.join([row[4] for row in rows])] # The query returns the sql over multiple rows # We need to join then into a single row ddl_table.reduce_rows( - ["table_id", "schemaname", "tablename"], + ['table_id', 'schemaname', 'tablename'], join_sql_parts, - ["tablename", "ddl"], - presorted=True, - ) + ['tablename', 'ddl'], + presorted=True) return ddl_table.to_dicts() @@ -718,8 +700,8 @@ def get_view_definitions(self, schema=None, view=None): if view: conditions.append(f"g.viewname like '{view}'") - conditions_str = " and ".join(conditions) - where_clause = f"where {conditions_str}" if conditions_str else "" + conditions_str = ' and '.join(conditions) + where_clause = f"where {conditions_str}" if conditions_str else '' # ddl_query = pkgutil.get_data( # __name__, "queries/v_generate_view_ddl.sql").decode() diff --git a/parsons/databases/table.py b/parsons/databases/table.py index a6b2f7464c..9bbe7d959b 100644 --- a/parsons/databases/table.py +++ b/parsons/databases/table.py @@ -32,14 +32,12 @@ def max_primary_key(self, primary_key): Get the maximum primary key in the table. """ - return self.db.query( - f""" + return self.db.query(f""" SELECT {primary_key} FROM {self.table} ORDER BY {primary_key} DESC LIMIT 1 - """ - ).first + """).first def distinct_primary_key(self, primary_key): """ @@ -127,7 +125,7 @@ def get_new_rows(self, primary_key, cutoff_value, offset=0, chunk_size=None): where_clause = f"WHERE {primary_key} > %s" parameters = [cutoff_value] else: - where_clause = "" + where_clause = '' parameters = [] sql = f""" @@ -150,17 +148,17 @@ def drop(self, cascade=False): Drop the table. """ - sql = f"DROP TABLE {self.table}" + sql = f'DROP TABLE {self.table}' if cascade: - sql += " CASCADE" + sql += ' CASCADE' self.db.query(sql) - logger.info(f"{self.table} dropped.") + logger.info(f'{self.table} dropped.') def truncate(self): """ Truncate the table. """ - self.db.query(f"TRUNCATE TABLE {self.table}") - logger.info(f"{self.table} truncated.") + self.db.query(f'TRUNCATE TABLE {self.table}') + logger.info(f'{self.table} truncated.') diff --git a/parsons/donorbox/__init__.py b/parsons/donorbox/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/parsons/donorbox/donorbox.py b/parsons/donorbox/donorbox.py deleted file mode 100644 index 917a0ba156..0000000000 --- a/parsons/donorbox/donorbox.py +++ /dev/null @@ -1,227 +0,0 @@ -from parsons.utilities.api_connector import APIConnector -from parsons.utilities import check_env -from parsons import Table - -import logging -import datetime - -logger = logging.getLogger(__name__) - -URI = "https://donorbox.org/api/v1" - - -class Donorbox(object): - """ - Instantiate Donorbox class. - - `Args:` - donorbox_account_email: str - The email associated with your Donorbox account. Can be passed as - argument or set as ``DONORBOX_ACCOUNT_EMAIL`` environment variable. - donorbox_api_key: str - The API key generated by Donorbox for your account. Can be passed as - argument or set as ``DONORBOX_API_KEY`` environment variable. - """ - - def __init__(self, email=None, api_key=None): - self.email = check_env.check("DONORBOX_ACCOUNT_EMAIL", email) - self.api_key = check_env.check("DONORBOX_API_KEY", api_key) - self.uri = URI - self.client = APIConnector(self.uri, auth=(self.email, self.api_key)) - - def get_campaigns(self, **kwargs): - """ - Get information on campaigns. - - `Args:` - id: int or str - Optional. The ID of the campaign to get. If both id and name are omitted, returns - all campaigns. - name: str - Optional. The name of the campaign to get. If both id and name are omitted, retunrs - all campaigns. - order: str - Optional. Valid values are "asc" and "desc". If not supplied, order is descending - by default. - page: int - Optional. Donorbox supports pagination for larger results. Use the page to track - your progress. - per_page: int - Optional. Results per page when using pagination. Default is 50, maximum is 100. - - `Returns`: - Parsons Table - """ - result = self.client.request("campaigns", "GET", params=kwargs) - data = result.json() - return Table(data) - - def get_donations(self, **kwargs): - """ - Get information on donations. - - `Args:` - email: str - Optional. Filter's donations by donor's email - date_from: str - | Optional. Filters donations to those started on or after the provided date. - | Valid formats: YYYY-mm-dd YYYY/mm/dd YYYYmmdd dd-mm-YYYY - | If an incorrectly formatted date is provided, an error is raised. - date_to: str - | Optional. Filters donations to those started before the provided date. - | Valid formats: YYYY-mm-dd YYYY/mm/dd YYYYmmdd dd-mm-YYYY - | If an incorrectly formatted date is provided, an error is raised. - campaign_name: str - Optional. Filters by the campaign title that you have defined in Donorbox. - campaign_id: int or str - Optional. Filters by Donorbox campaign id. - donation_id: int or str - Optional. Filters by Donorbox donations id. - first_name: str - Optional. Filters by donor's first name. - last_name: str - Optional. Filters by donor's last name. - donor_id: int or str - Optional. Filters by Donorbox donor id. - amount_min: int or str - Optional. Gets all donations above the provided minimum. - amount_max: int or str - Optional. Gets all donations below the provided maximum. - order: str - Optional. Valid values are "asc" and "desc". If not supplied, order is - descending by default. - page: int - Optional. Donorbox supports pagination for larger results. Use the page - to track your progress. - per_page: int - Optional. Results per page when using pagination. Default is 50, maximum - is 100. - - `Returns`: - Parsons Table - """ - # switch variable names - if "amount_max" in kwargs: - kwargs["amount[usd][max]"] = kwargs.pop("amount_max") - if "amount_min" in kwargs: - kwargs["amount[usd][min]"] = kwargs.pop("amount_min") - if "donation_id" in kwargs: - kwargs["id"] = kwargs.pop("donation_id") - self._check_date_helper(kwargs) - data = self.client.get_request("donations", params=kwargs) - return Table(data) - - def get_donors(self, **kwargs): - """ - Get information on donors. - - `Args:` - - donor_id: str or int - Optional. Filters by donor ID. - first_name: str - Optional. Filters by donor's first name. - last_name: str - Optional. Filters by donor's last name. - donor_name: str - Optional. Filter by donor's full name - email: str - Optional. Filter's donations by donor's email - order: str - Optional. Valid values are "asc" and "desc". If not supplied, order is descending - by default. - page: int - Optional. Donorbox supports pagination for larger results. Use the page to track - your progress. - per_page: int - Optional. Results per page when using pagination. Default is 50, maximum is 100. - - `Returns`: - Parsons Table - """ - if "donor_id" in kwargs: - kwargs["id"] = kwargs.pop( - "donor_id" - ) # switch to Donorbox's (less specific) name - data = self.client.get_request("donors", params=kwargs) - return Table(data) - - def get_plans(self, **kwargs): - """ - Get information on plans. - - `Args:` - - email: str - Optional. Filter's plans by donor's email address. - date_from: str - | Optional. Filters plans to those started on or after the provided date. - | Valid formats: YYYY-mm-dd YYYY/mm/dd YYYYmmdd dd-mm-YYYY - | If an incorrectly formatted date is provided, an error is raised. - date_to: str - | Optional. Filters plans to those started before the provided date. - | Valid formats: YYYY-mm-dd YYYY/mm/dd YYYYmmdd dd-mm-YYYY - | If an incorrectly formatted date is provided, an error is raised. - campaign_id: int or str - Optional. Filters by Donorbox campaign id. - campaign_name: str - Optional. Filters by the campaign title that you have defined in Donorbox. - donor_id: str or int - Optional. Filters by donor ID. - first_name: str - Optional. Filters by donor's first name. - last_name: str - Optional. Filters by donor's last name. - donor_name: str - Optional. Filter by donor's full name - order: str - Optional. Valid values are "asc" and "desc". If not supplied, order is descending - by default. - page: int - Optional. Donorbox supports pagination for larger results. Use the page to track - your progress. - per_page: int - Optional. Results per page when using pagination. Default is 50, maximum is 100. - - `Returns`: - Parsons Table - """ - self._check_date_helper(kwargs) - data = self.client.get_request("plans", params=kwargs) - return Table(data) - - def _check_date_helper(self, params): - """Searches through params for a date parameter and if found, calls format helper. - - params: dictionary - Required. Dictionary of parameters to be passed to endpoint. - - `Returns`: None - - """ - if "date_from" in params and params["date_from"] is not None: - self._date_format_helper(params["date_from"]) - if "date_to" in params and params["date_to"] is not None: - self._date_format_helper(params["date_to"]) - - def _date_format_helper(self, date_string): - """Checks date format and warns if invalid (internal) - - Valid formats: YYYY-mm-dd YYYY/mm/dd YYYYmmdd dd-mm-YYYY - - date_string: str - Required. Date in a string format to be checked against Donorbox's valid options. - - `Returns`: None - """ - valid_formats = ["%Y-%m-%d", "%d-%m-%Y", "%Y/%m/%d", "%Y%m%d"] - for str_format in valid_formats: - try: - datetime.datetime.strptime(date_string, str_format) - return - except ValueError: - continue - raise ValueError( - f"The date you supplied, {date_string}, is not a valid Donorbox format." - + "Try the following formats: YYYY-mm-dd YYYY/mm/dd YYYYmmdd dd-mm-YYYY" - ) diff --git a/parsons/etl/__init__.py b/parsons/etl/__init__.py index 59e433f2ea..c828e8f09b 100644 --- a/parsons/etl/__init__.py +++ b/parsons/etl/__init__.py @@ -2,4 +2,8 @@ from parsons.etl.tofrom import ToFrom from parsons.etl.etl import ETL -__all__ = ["ETL", "Table", "ToFrom"] +__all__ = [ + 'ETL', + 'Table', + 'ToFrom' +] diff --git a/parsons/etl/etl.py b/parsons/etl/etl.py index 5a1452f2b8..39df7a5044 100644 --- a/parsons/etl/etl.py +++ b/parsons/etl/etl.py @@ -1,16 +1,16 @@ -import logging - import petl +import logging logger = logging.getLogger(__name__) class ETL(object): + def __init__(self): pass - def add_column(self, column, value=None, index=None, if_exists="fail"): + def add_column(self, column, value=None, index=None): """ Add a column to your table @@ -21,19 +21,12 @@ def add_column(self, column, value=None, index=None, if_exists="fail"): A fixed or calculated value index: int The position of the new column in the table - if_exists: str (options: 'fail', 'replace') - If set `replace`, this function will call `fill_column` - if the column already exists, rather than raising a `ValueError` `Returns:` `Parsons Table` and also updates self """ if column in self.columns: - if if_exists == "replace": - self.fill_column(column, value) - return self - else: - raise ValueError(f"Column {column} already exists") + raise ValueError(f"Column {column} already exists") self.table = self.table.addfield(column, value, index) @@ -88,9 +81,8 @@ def fill_column(self, column_name, fill_value): """ if callable(fill_value): - self.table = petl.convert( - self.table, column_name, lambda _, r: fill_value(r), pass_row=True - ) + self.table = petl.convert(self.table, column_name, lambda _, r: fill_value(r), + pass_row=True) else: self.table = petl.update(self.table, column_name, fill_value) @@ -110,20 +102,11 @@ def fillna_column(self, column_name, fill_value): """ if callable(fill_value): - self.table = petl.convert( - self.table, - column_name, - lambda _, r: fill_value(r), - where=lambda r: r[column_name] is None, - pass_row=True, - ) + self.table = petl.convert(self.table, column_name, lambda _, r: fill_value(r), + where=lambda r: r[column_name] is None, pass_row=True) else: - self.table = petl.update( - self.table, - column_name, - fill_value, - where=lambda r: r[column_name] is None, - ) + self.table = petl.update(self.table, column_name, fill_value, + where=lambda r: r[column_name] is None) return self @@ -178,8 +161,8 @@ def get_column_max_width(self, column): for v in petl.values(self.table, column): - if len(str(v).encode("utf-8")) > max_width: - max_width = len(str(v).encode("utf-8")) + if len(str(v).encode('utf-8')) > max_width: + max_width = len(str(v).encode('utf-8')) return max_width @@ -198,16 +181,11 @@ def convert_columns_to_str(self): cols = self.get_columns_type_stats() - def str_or_empty(x): - if x is None: - return "" - return str(x) - for col in cols: # If there's more than one type (or no types), convert to str # Also if there is one type and it's not str, convert to str - if len(col["type"]) != 1 or col["type"][0] != "str": - self.convert_column(col["name"], str_or_empty) + if len(col['type']) != 1 or col['type'][0] != 'str': + self.convert_column(col['name'], str) return self @@ -229,7 +207,6 @@ def coalesce_columns(self, dest_column, source_columns, remove_source_columns=Tr """ if dest_column in self.columns: - def convert_fn(value, row): for source_col in source_columns: if row.get(source_col): @@ -239,7 +216,6 @@ def convert_fn(value, row): self.convert_column(dest_column, convert_fn, pass_row=True) else: - def add_fn(row): for source_col in source_columns: if row.get(source_col): @@ -287,7 +263,7 @@ def map_columns(self, column_map, exact_match=True): for col in self.columns: if not exact_match: - cleaned_col = col.lower().replace("_", "").replace(" ", "") + cleaned_col = col.lower().replace('_', '').replace(' ', '') else: cleaned_col = col @@ -340,8 +316,8 @@ def map_and_coalesce_columns(self, column_map): # if the key from the mapping dict already exists in the table, # rename it so it can be coalesced with other possible columns if key in self.columns: - self.rename_column(key, f"{key}_temp") - coalesce_list.insert(0, f"{key}_temp") + self.rename_column(key, f'{key}_temp') + coalesce_list.insert(0, f'{key}_temp') # coalesce columns self.coalesce_columns(key, coalesce_list, remove_source_columns=True) @@ -374,10 +350,8 @@ def get_columns_type_stats(self): A list of dicts, each containing a column 'name' and a 'type' list """ - return [ - {"name": col, "type": self.get_column_types(col)} - for col in self.table.columns() - ] + return [{'name': col, 'type': self.get_column_types(col)} + for col in self.table.columns()] def convert_table(self, *args): """ @@ -397,16 +371,9 @@ def convert_table(self, *args): return self - def unpack_dict( - self, - column, - keys=None, - include_original=False, - sample_size=5000, - missing=None, - prepend=True, - prepend_value=None, - ): + def unpack_dict(self, column, keys=None, include_original=False, + sample_size=5000, missing=None, prepend=True, + prepend_value=None): """ Unpack dictionary values from one column into separate columns @@ -435,28 +402,18 @@ def unpack_dict( prepend_value = column self.table = petl.convert( - self.table, column, lambda v: self._prepend_dict(v, prepend_value) - ) + self.table, + column, + lambda v: self._prepend_dict(v, prepend_value)) self.table = petl.unpackdict( - self.table, - column, - keys=keys, - includeoriginal=include_original, - samplesize=sample_size, - missing=missing, - ) + self.table, column, keys=keys, includeoriginal=include_original, + samplesize=sample_size, missing=missing) return self - def unpack_list( - self, - column, - include_original=False, - missing=None, - replace=False, - max_columns=None, - ): + def unpack_list(self, column, include_original=False, missing=None, replace=False, + max_columns=None): """ Unpack list values from one column into separate columns. Numbers the columns. @@ -513,15 +470,10 @@ def unpack_list( # Create new column names "COL_01, COL_02" new_cols = [] for i in range(col_count): - new_cols.append(column + "_" + str(i)) + new_cols.append(column + '_' + str(i)) - tbl = petl.unpack( - self.table, - column, - new_cols, - include_original=include_original, - missing=missing, - ) + tbl = petl.unpack(self.table, column, new_cols, + include_original=include_original, missing=missing) if replace: self.table = tbl @@ -529,7 +481,7 @@ def unpack_list( else: return tbl - def unpack_nested_columns_as_rows(self, column, key="id", expand_original=False): + def unpack_nested_columns_as_rows(self, column, key='id', expand_original=False): """ Unpack list or dict values from one column into separate rows. Not recommended for JSON columns (i.e. lists of dicts), but can handle columns @@ -551,11 +503,7 @@ def unpack_nested_columns_as_rows(self, column, key="id", expand_original=False) """ if isinstance(expand_original, int) and expand_original is not True: - lengths = { - len(row[column]) - for row in self - if isinstance(row[column], (dict, list)) - } + lengths = {len(row[column]) for row in self if isinstance(row[column], (dict, list))} max_len = sorted(lengths, reverse=True)[0] if max_len > expand_original: expand_original = False @@ -567,9 +515,7 @@ def unpack_nested_columns_as_rows(self, column, key="id", expand_original=False) else: # Otherwise, include only key and column, but keep all non-dict types in table_list table = self.cut(key, column) - table_list = table.select_rows( - lambda row: not isinstance(row[column], dict) - ) + table_list = table.select_rows(lambda row: not isinstance(row[column], dict)) # All the columns other than column to ignore while melting ignore_cols = table.columns @@ -580,8 +526,8 @@ def unpack_nested_columns_as_rows(self, column, key="id", expand_original=False) # Rename the columns to retain only the number for col in table_list.columns: - if f"{column}_" in col: - table_list.rename_column(col, col.replace(f"{column}_", "")) + if f'{column}_' in col: + table_list.rename_column(col, col.replace(f'{column}_', "")) # Filter dicts and unpack as separate columns table_dict = table.select_rows(lambda row: isinstance(row[column], dict)) @@ -593,11 +539,11 @@ def unpack_nested_columns_as_rows(self, column, key="id", expand_original=False) melted_list = Table(petl.melt(table_list.table, ignore_cols)) melted_dict = Table(petl.melt(table_dict.table, ignore_cols)) - melted_list.remove_null_rows("value") - melted_dict.remove_null_rows("value") + melted_list.remove_null_rows('value') + melted_dict.remove_null_rows('value') - melted_list.rename_column("variable", column) - melted_dict.rename_column("variable", column) + melted_list.rename_column('variable', column) + melted_dict.rename_column('variable', column) # Combine the list and dict Tables melted_list.concat(melted_dict) @@ -606,22 +552,19 @@ def unpack_nested_columns_as_rows(self, column, key="id", expand_original=False) if expand_original: # Add unpacked rows to the original table (minus packed rows) - orig = self.select_rows( - lambda row: not isinstance(row[column], (dict, list)) - ) + orig = self.select_rows(lambda row: not isinstance(row[column], (dict, list))) orig.concat(melted_list) # Add unique id column by hashing all the other fields - if "uid" not in self.columns: - orig.add_column( - "uid", - lambda row: hashlib.md5( - str.encode("".join([str(x) for x in row])) - ).hexdigest(), - ) - orig.move_column("uid", 0) + if 'uid' not in self.columns: + orig.add_column('uid', lambda row: hashlib.md5( + str.encode( + ''.join([str(x) for x in row]) + ) + ).hexdigest()) + orig.move_column('uid', 0) # Rename value column in case this is done again to this Table - orig.rename_column("value", f"{column}_value") + orig.rename_column('value', f'{column}_value') # Keep column next to column_value orig.move_column(column, -1) @@ -629,27 +572,19 @@ def unpack_nested_columns_as_rows(self, column, key="id", expand_original=False) else: orig = self.remove_column(column) # Add unique id column by hashing all the other fields - melted_list.add_column( - "uid", - lambda row: hashlib.md5( - str.encode("".join([str(x) for x in row])) - ).hexdigest(), - ) - melted_list.move_column("uid", 0) + melted_list.add_column('uid', lambda row: hashlib.md5( + str.encode( + ''.join([str(x) for x in row]) + ) + ).hexdigest()) + melted_list.move_column('uid', 0) output = melted_list self = orig return output - def long_table( - self, - key, - column, - key_rename=None, - retain_original=False, - prepend=True, - prepend_value=None, - ): + def long_table(self, key, column, key_rename=None, retain_original=False, + prepend=True, prepend_value=None): """ Create a new long parsons table from a column, including the foreign key. @@ -704,8 +639,8 @@ def long_table( lt = self.cut(*key, column) # Create a table of key and column lt.unpack_list(column, replace=True) # Unpack the list lt.table = petl.melt(lt.table, key) # Melt into a long table - lt = lt.cut(*key, "value") # Get rid of column names created in unpack - lt.rename_column("value", column) # Rename 'value' to old column name + lt = lt.cut(*key, 'value') # Get rid of column names created in unpack + lt.rename_column('value', column) # Rename 'value' to old column name lt.remove_null_rows(column) # Remove null values # If a new key name is specified, rename @@ -802,7 +737,7 @@ def _prepend_dict(self, dict_obj, prepend): for k, v in dict_obj.items(): - new_dict[prepend + "_" + k] = v + new_dict[prepend + '_' + k] = v return new_dict @@ -864,11 +799,7 @@ def chunk(self, rows): """ from parsons.etl import Table - - return [ - Table(petl.rowslice(self.table, i, i + rows)) - for i in range(0, self.num_rows, rows) - ] + return [Table(petl.rowslice(self.table, i, i+rows)) for i in range(0, self.num_rows, rows)] @staticmethod def get_normalized_column_name(column_name): @@ -882,15 +813,10 @@ def get_normalized_column_name(column_name): """ column_name = column_name.lower().strip() - return "".join(c for c in column_name if c.isalnum()) - - def match_columns( - self, - desired_columns, - fuzzy_match=True, - if_extra_columns="remove", - if_missing_columns="add", - ): + return ''.join(c for c in column_name if c.isalnum()) + + def match_columns(self, desired_columns, fuzzy_match=True, if_extra_columns='remove', + if_missing_columns='add'): """ Changes the column names and ordering in this Table to match a list of desired column names. @@ -917,13 +843,11 @@ def match_columns( from parsons.etl import Table # Just trying to avoid recursive imports. - normalize_fn = ( - Table.get_normalized_column_name if fuzzy_match else (lambda s: s) - ) + normalize_fn = Table.get_normalized_column_name if fuzzy_match else (lambda s: s) # Create a mapping of our "normalized" name to the original column name current_columns_normalized = { - normalize_fn(col): col for col in reversed(self.columns) + normalize_fn(col): col for col in self.columns } # Track any columns we need to add to our current table from our desired columns @@ -942,22 +866,20 @@ def match_columns( # Try to find our desired column in our Table if normalized_desired not in current_columns_normalized: # If we can't find our desired column in our current columns, then it's "missing" - if if_missing_columns == "fail": + if if_missing_columns == 'fail': # If our missing strategy is to fail, raise an exception raise TypeError(f"Table is missing column {desired_column}") - elif if_missing_columns == "add": + elif if_missing_columns == 'add': # We have to add to our table columns_to_add.append(desired_column) # We will need to remember this column when we cut down to desired columns cut_columns.append(desired_column) # This will be in the final table final_header.append(desired_column) - elif if_missing_columns != "ignore": + elif if_missing_columns != 'ignore': # If it's not ignore, add, or fail, then it's not a valid strategy - raise TypeError( - f"Invalid option {if_missing_columns} for " - "argument `if_missing_columns`" - ) + raise TypeError(f"Invalid option {if_missing_columns} for " + "argument `if_missing_columns`") else: # We have found this in our current columns, so take it out of our list to search current_column = current_columns_normalized.pop(normalized_desired) @@ -969,20 +891,18 @@ def match_columns( # Look for any "extra" columns from our current table that aren't in our desired columns for current_column in current_columns_normalized.values(): # Figure out what to do with our "extra" columns - if if_extra_columns == "fail": + if if_extra_columns == 'fail': # If our missing strategy is to fail, raise an exception raise TypeError(f"Table has extra column {current_column}") - elif if_extra_columns == "ignore": + elif if_extra_columns == 'ignore': # If we're "ignore"ing our extra columns, we should keep them by adding them to # our intermediate and final columns list cut_columns.append(current_column) final_header.append(current_column) - elif if_extra_columns != "remove": + elif if_extra_columns != 'remove': # If it's not ignore, add, or fail, then it's not a valid strategy - raise TypeError( - f"Invalid option {if_extra_columns} for " - "argument `if_extra_columns`" - ) + raise TypeError(f"Invalid option {if_extra_columns} for " + "argument `if_extra_columns`") # Add any columns we need to add for column in columns_to_add: @@ -996,7 +916,8 @@ def match_columns( return self - def reduce_rows(self, columns, reduce_func, headers, presorted=False, **kwargs): + def reduce_rows(self, columns, reduce_func, headers, presorted=False, + **kwargs): """ Group rows by a column or columns, then reduce the groups to a single row. @@ -1062,7 +983,7 @@ def reduce_rows(self, columns, reduce_func, headers, presorted=False, **kwargs): `Returns:` `Parsons Table` and also updates self - """ # noqa: E501,E261 + """ # noqa: E501,E261 self.table = petl.rowreduce( self.table, @@ -1070,8 +991,7 @@ def reduce_rows(self, columns, reduce_func, headers, presorted=False, **kwargs): reduce_func, header=headers, presorted=presorted, - **kwargs, - ) + **kwargs) return self @@ -1155,8 +1075,8 @@ def use_petl(self, petl_method, *args, **kwargs): `Returns:` `parsons.Table` or `petl` table """ # noqa: E501 - update_table = kwargs.pop("update_table", False) - to_petl = kwargs.pop("to_petl", False) + update_table = kwargs.pop('update_table', False) + to_petl = kwargs.pop('to_petl', False) if update_table: self.table = getattr(petl, petl_method)(self.table, *args, **kwargs) @@ -1167,93 +1087,3 @@ def use_petl(self, petl_method, *args, **kwargs): from parsons.etl.table import Table return Table(getattr(petl, petl_method)(self.table, *args, **kwargs)) - - def deduplicate(self, keys=None, presorted=False): - """ - Deduplicates table based on an optional ``keys`` argument, - which can contain any number of keys or None. - - Method considers all keys specified in the ``keys`` argument - when deduplicating, not each key individually. For example, - if ``keys=['a', 'b']``, the method will not remove a record - unless it's identical to another record in both columns ``a`` and ``b``. - - .. code-block:: python - - >>> tbl = Table([['a', 'b'], [1, 3], [1, 2], [1, 2], [2, 3]]) - >>> tbl.table - +---+---+ - | a | b | - +===+===+ - | 1 | 3 | - +---+---+ - | 1 | 2 | - +---+---+ - | 1 | 2 | - +---+---+ - | 2 | 3 | - +---+---+ - - >>> tbl.deduplicate('a') - >>> # removes all subsequent rows with {'a': 1} - >>> tbl.table - +---+---+ - | a | b | - +===+===+ - | 1 | 3 | - +---+---+ - | 2 | 3 | - +---+---+ - - >>> tbl = Table([['a', 'b'], [1, 3], [1, 2], [1, 2], [2, 3]]) # reset - >>> tbl.deduplicate(['a', 'b']) - >>> # sorted on both ('a', 'b') so (1, 2) was placed before (1, 3) - >>> # did not remove second instance of {'a': 1} or {'b': 3} - >>> tbl.table - +---+---+ - | a | b | - +===+===+ - | 1 | 2 | - +---+---+ - | 1 | 3 | - +---+---+ - | 2 | 3 | - +---+---+ - - - >>> tbl = Table([['a', 'b'], [1, 3], [1, 2], [1, 2], [2, 3]]) # reset - >>> tbl.deduplicate('a').deduplicate('b') - >>> # can chain method to sort/dedupe on 'a', then sort/dedupe on 'b' - >>> tbl.table - +---+---+ - | a | b | - +===+===+ - | 1 | 3 | - +---+---+ - - >>> tbl = Table([['a', 'b'], [1, 3], [1, 2], [1, 2], [2, 3]]) # reset - >>> tbl.deduplicate('b').deduplicate('a') - >>> # Order DOES matter when deduping on one column at a time - >>> tbl.table - +---+---+ - | a | b | - +===+===+ - | 1 | 2 | - +---+---+ - - `Args:` - keys: str or list[str] or None - keys to deduplicate (and optionally sort) on. - presorted: bool - If false, the row will be sorted. - `Returns`: - `Parsons Table` and also updates self - - """ - - deduped = petl.transform.dedup.distinct( - self.table, key=keys, presorted=presorted - ) - self.table = deduped - - return self diff --git a/parsons/etl/table.py b/parsons/etl/table.py index b0a0fd42fe..6eb1055e2e 100644 --- a/parsons/etl/table.py +++ b/parsons/etl/table.py @@ -82,7 +82,7 @@ def __getitem__(self, index): else: - raise TypeError("You must pass a string or an index as a value.") + raise TypeError('You must pass a string or an index as a value.') def __bool__(self): @@ -108,9 +108,6 @@ def num_rows(self): """ return petl.nrows(self.table) - def __len__(self): - return self.num_rows - @property def data(self): """ @@ -156,15 +153,13 @@ def row_data(self, row_index): self._index_count += 1 if self._index_count >= DIRECT_INDEX_WARNING_COUNT: - logger.warning( - """ + logger.warning(""" You have indexed directly into this Table multiple times. This can be inefficient, as data transformations you've made will be computed _each time_ you index into the Table. If you are accessing many rows of data, consider switching to this style of iteration, which is much more efficient: `for row in table:` - """ - ) + """) return petl.dicts(self.table)[row_index] @@ -184,7 +179,7 @@ def column_data(self, column_name): return list(self.table[column_name]) else: - raise ValueError("Column name not found.") + raise ValueError('Column name not found.') def materialize(self): """ @@ -221,7 +216,7 @@ def materialize_to_file(self, file_path=None): file_path = file_path or files.create_temp_file() - with open(file_path, "wb") as handle: + with open(file_path, 'wb') as handle: for row in self.table: pickle.dump(list(row), handle) diff --git a/parsons/etl/tofrom.py b/parsons/etl/tofrom.py index 4d564659bf..942f3f9ae3 100644 --- a/parsons/etl/tofrom.py +++ b/parsons/etl/tofrom.py @@ -6,7 +6,9 @@ class ToFrom(object): - def to_dataframe(self, index=None, exclude=None, columns=None, coerce_float=False): + + def to_dataframe(self, index=None, exclude=None, columns=None, + coerce_float=False): """ Outputs table as a Pandas Dataframe @@ -27,25 +29,12 @@ def to_dataframe(self, index=None, exclude=None, columns=None, coerce_float=Fals Pandas DataFrame object """ - return petl.todataframe( - self.table, - index=index, - exclude=exclude, - columns=columns, - coerce_float=coerce_float, - ) - - def to_html( - self, - local_path=None, - encoding=None, - errors="strict", - index_header=False, - caption=None, - tr_style=None, - td_styles=None, - truncate=None, - ): + return petl.todataframe(self.table, index=index, exclude=exclude, + columns=columns, coerce_float=coerce_float) + + def to_html(self, local_path=None, encoding=None, errors='strict', + index_header=False, caption=None, tr_style=None, + td_styles=None, truncate=None): """ Outputs table to html. @@ -81,30 +70,20 @@ def to_html( if not local_path: local_path = files.create_temp_file(suffix=".html") - petl.tohtml( - self.table, - source=local_path, - encoding=encoding, - errors=errors, - caption=caption, - index_header=index_header, - tr_style=tr_style, - td_styles=td_styles, - truncate=truncate, - ) + petl.tohtml(self.table, + source=local_path, + encoding=encoding, + errors=errors, + caption=caption, + index_header=index_header, + tr_style=tr_style, + td_styles=td_styles, + truncate=truncate) return local_path - def to_csv( - self, - local_path=None, - temp_file_compression=None, - encoding=None, - errors="strict", - write_header=True, - csv_name=None, - **csvargs, - ): + def to_csv(self, local_path=None, temp_file_compression=None, encoding=None, errors='strict', + write_header=True, csv_name=None, **csvargs): """ Outputs table to a CSV. Additional key word arguments are passed to ``csv.writer()``. So, e.g., to override the delimiter from the default CSV dialect, provide the delimiter @@ -143,32 +122,28 @@ def to_csv( # If a zip archive. if files.zip_check(local_path, temp_file_compression): - return self.to_zip_csv( - archive_path=local_path, - encoding=encoding, - errors=errors, - write_header=write_header, - csv_name=csv_name, - **csvargs, - ) + return self.to_zip_csv(archive_path=local_path, + encoding=encoding, + errors=errors, + write_header=write_header, + csv_name=csv_name, + **csvargs) if not local_path: - suffix = ".csv" + files.suffix_for_compression_type(temp_file_compression) + suffix = '.csv' + files.suffix_for_compression_type(temp_file_compression) local_path = files.create_temp_file(suffix=suffix) # Create normal csv/.gzip - petl.tocsv( - self.table, - source=local_path, - encoding=encoding, - errors=errors, - write_header=write_header, - **csvargs, - ) + petl.tocsv(self.table, + source=local_path, + encoding=encoding, + errors=errors, + write_header=write_header, + **csvargs) return local_path - def append_csv(self, local_path, encoding=None, errors="strict", **csvargs): + def append_csv(self, local_path, encoding=None, errors='strict', **csvargs): """ Appends table to an existing CSV. @@ -193,21 +168,15 @@ def append_csv(self, local_path, encoding=None, errors="strict", **csvargs): The path of the file """ # noqa: W605 - petl.appendcsv( - self.table, source=local_path, encoding=encoding, errors=errors, **csvargs - ) + petl.appendcsv(self.table, + source=local_path, + encoding=encoding, + errors=errors, + **csvargs) return local_path - def to_zip_csv( - self, - archive_path=None, - csv_name=None, - encoding=None, - errors="strict", - write_header=True, - if_exists="replace", - **csvargs, - ): + def to_zip_csv(self, archive_path=None, csv_name=None, encoding=None, + errors='strict', write_header=True, if_exists='replace', **csvargs): """ Outputs table to a CSV in a zip archive. Additional key word arguments are passed to ``csv.writer()``. So, e.g., to override the delimiter from the default CSV dialect, @@ -243,24 +212,17 @@ def to_zip_csv( """ # noqa: W605 if not archive_path: - archive_path = files.create_temp_file(suffix=".zip") + archive_path = files.create_temp_file(suffix='.zip') - cf = self.to_csv( - encoding=encoding, errors=errors, write_header=write_header, **csvargs - ) + cf = self.to_csv(encoding=encoding, errors=errors, write_header=write_header, **csvargs) if not csv_name: - csv_name = ( - files.extract_file_name(archive_path, include_suffix=False) + ".csv" - ) + csv_name = files.extract_file_name(archive_path, include_suffix=False) + '.csv' - return zip_archive.create_archive( - archive_path, cf, file_name=csv_name, if_exists=if_exists - ) + return zip_archive.create_archive(archive_path, cf, file_name=csv_name, + if_exists=if_exists) - def to_json( - self, local_path=None, temp_file_compression=None, line_delimited=False - ): + def to_json(self, local_path=None, temp_file_compression=None, line_delimited=False): """ Outputs table to a JSON file @@ -287,7 +249,7 @@ def to_json( """ if not local_path: - suffix = ".json" + files.suffix_for_compression_type(temp_file_compression) + suffix = '.json' + files.suffix_for_compression_type(temp_file_compression) local_path = files.create_temp_file(suffix=suffix) # Note we don't use the much simpler petl.tojson(), since that method reads the whole @@ -295,26 +257,26 @@ def to_json( if files.is_gzip_path(local_path): open_fn = gzip.open - mode = "w+t" + mode = 'w+t' else: open_fn = open - mode = "w" + mode = 'w' with open_fn(local_path, mode) as file: if not line_delimited: - file.write("[") + file.write('[') i = 0 for row in self: if i: if not line_delimited: - file.write(",") - file.write("\n") + file.write(',') + file.write('\n') i += 1 json.dump(row, file) if not line_delimited: - file.write("]") + file.write(']') return local_path @@ -328,20 +290,9 @@ def to_dicts(self): return list(petl.dicts(self.table)) - def to_sftp_csv( - self, - remote_path, - host, - username, - password, - port=22, - encoding=None, - compression=None, - errors="strict", - write_header=True, - rsa_private_key_file=None, - **csvargs, - ): + def to_sftp_csv(self, remote_path, host, username, password, port=22, encoding=None, + compression=None, errors='strict', write_header=True, + rsa_private_key_file=None, **csvargs): """ Writes the table to a CSV file on a remote SFTP server @@ -377,30 +328,14 @@ def to_sftp_csv( compression = files.compression_type_for_path(remote_path) local_path = self.to_csv( - temp_file_compression=compression, - encoding=encoding, - errors=errors, - write_header=write_header, - **csvargs, - ) + temp_file_compression=compression, encoding=encoding, errors=errors, + write_header=write_header, **csvargs) sftp.put_file(local_path, remote_path) - def to_s3_csv( - self, - bucket, - key, - aws_access_key_id=None, - aws_secret_access_key=None, - compression=None, - encoding=None, - errors="strict", - write_header=True, - acl="bucket-owner-full-control", - public_url=False, - public_url_expires=3600, - use_env_token=True, - **csvargs, - ): + def to_s3_csv(self, bucket, key, aws_access_key_id=None, + aws_secret_access_key=None, compression=None, encoding=None, + errors='strict', write_header=True, acl='bucket-owner-full-control', + public_url=False, public_url_expires=3600, **csvargs): """ Writes the table to an s3 object as a CSV @@ -430,10 +365,6 @@ def to_s3_csv( The time, in seconds, until the url expires if ``public_url`` set to ``True``. acl: str The S3 permissions on the file - use_env_token: boolean - Controls use of the ``AWS_SESSION_TOKEN`` environment variable for S3. Defaults - to ``True``. Set to ``False`` in order to ignore the ``AWS_SESSION_TOKEN`` env - variable even if the ``aws_session_token`` argument was not passed in. \**csvargs: kwargs ``csv_writer`` optional arguments `Returns:` @@ -442,26 +373,20 @@ def to_s3_csv( compression = compression or files.compression_type_for_path(key) - csv_name = files.extract_file_name(key, include_suffix=False) + ".csv" + csv_name = files.extract_file_name(key, include_suffix=False) + '.csv' # Save the CSV as a temp file - local_path = self.to_csv( - temp_file_compression=compression, - encoding=encoding, - errors=errors, - write_header=write_header, - csv_name=csv_name, - **csvargs, - ) + local_path = self.to_csv(temp_file_compression=compression, + encoding=encoding, + errors=errors, + write_header=write_header, + csv_name=csv_name, + **csvargs) # Put the file on S3 from parsons.aws import S3 - - self.s3 = S3( - aws_access_key_id=aws_access_key_id, - aws_secret_access_key=aws_secret_access_key, - use_env_token=use_env_token, - ) + self.s3 = S3(aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) self.s3.put_file(bucket, key, local_path, acl=acl) if public_url: @@ -469,20 +394,9 @@ def to_s3_csv( else: return None - def to_gcs_csv( - self, - bucket_name, - blob_name, - app_creds=None, - project=None, - compression=None, - encoding=None, - errors="strict", - write_header=True, - public_url=False, - public_url_expires=60, - **csvargs, - ): + def to_gcs_csv(self, bucket_name, blob_name, app_creds=None, project=None, compression=None, + encoding=None, errors='strict', write_header=True, public_url=False, + public_url_expires=60, **csvargs): """ Writes the table to a Google Cloud Storage blob as a CSV. @@ -520,20 +434,17 @@ def to_gcs_csv( compression = compression or files.compression_type_for_path(blob_name) - csv_name = files.extract_file_name(blob_name, include_suffix=False) + ".csv" + csv_name = files.extract_file_name(blob_name, include_suffix=False) + '.csv' # Save the CSV as a temp file - local_path = self.to_csv( - temp_file_compression=compression, - encoding=encoding, - errors=errors, - write_header=write_header, - csv_name=csv_name, - **csvargs, - ) + local_path = self.to_csv(temp_file_compression=compression, + encoding=encoding, + errors=errors, + write_header=write_header, + csv_name=csv_name, + **csvargs) from parsons.google.google_cloud_storage import GoogleCloudStorage - gcs = GoogleCloudStorage(app_creds=app_creds, project=project) gcs.put_blob(bucket_name, blob_name, local_path) @@ -542,16 +453,8 @@ def to_gcs_csv( else: return None - def to_redshift( - self, - table_name, - username=None, - password=None, - host=None, - db=None, - port=None, - **copy_args, - ): + def to_redshift(self, table_name, username=None, password=None, host=None, + db=None, port=None, **copy_args): """ Write a table to a Redshift database. Note, this requires you to pass AWS S3 credentials or store them as environmental variables. @@ -577,20 +480,11 @@ def to_redshift( """ # noqa: W605 from parsons.databases.redshift import Redshift - rs = Redshift(username=username, password=password, host=host, db=db, port=port) rs.copy(self, table_name, **copy_args) - def to_postgres( - self, - table_name, - username=None, - password=None, - host=None, - db=None, - port=None, - **copy_args, - ): + def to_postgres(self, table_name, username=None, password=None, host=None, + db=None, port=None, **copy_args): """ Write a table to a Postgres database. @@ -615,7 +509,6 @@ def to_postgres( """ # noqa: W605 from parsons.databases.postgres import Postgres - pg = Postgres(username=username, password=password, host=host, db=db, port=port) pg.copy(self, table_name, **copy_args) @@ -623,20 +516,9 @@ def to_petl(self): return self.table - def to_civis( - self, - table, - api_key=None, - db=None, - max_errors=None, - existing_table_rows="fail", - diststyle=None, - distkey=None, - sortkey1=None, - sortkey2=None, - wait=True, - **civisargs, - ): + def to_civis(self, table, api_key=None, db=None, max_errors=None, + existing_table_rows='fail', diststyle=None, distkey=None, + sortkey1=None, sortkey2=None, wait=True, **civisargs): """ Write the table to a Civis Redshift cluster. Additional key word arguments can passed to `civis.io.dataframe_to_civis() @@ -673,20 +555,12 @@ def to_civis( """ from parsons.civis.civisclient import CivisClient - civis = CivisClient(db=db, api_key=api_key) return civis.table_import( - self, - table, - max_errors=max_errors, - existing_table_rows=existing_table_rows, - diststyle=diststyle, - distkey=distkey, - sortkey1=sortkey1, - sortkey2=sortkey2, - wait=wait, - **civisargs, - ) + self, table, max_errors=max_errors, + existing_table_rows=existing_table_rows, diststyle=diststyle, + distkey=distkey, sortkey1=sortkey1, sortkey2=sortkey2, wait=wait, + **civisargs) @classmethod def from_csv(cls, local_path, **csvargs): @@ -711,7 +585,7 @@ def from_csv(cls, local_path, **csvargs): is_remote_file = False if not is_remote_file and not files.has_data(local_path): - raise ValueError("CSV file is empty") + raise ValueError('CSV file is empty') return cls(petl.fromcsv(local_path, **csvargs)) @@ -730,7 +604,7 @@ def from_csv_string(cls, str, **csvargs): See :ref:`parsons-table` for output options. """ - bytesio = io.BytesIO(str.encode("utf-8")) + bytesio = io.BytesIO(str.encode('utf-8')) memory_source = petl.io.sources.MemorySource(bytesio.read()) return cls(petl.fromcsv(memory_source, **csvargs)) @@ -777,7 +651,7 @@ def from_json(cls, local_path, header=None, line_delimited=False): else: open_fn = open - with open_fn(local_path, "r") as file: + with open_fn(local_path, 'r') as file: rows = [json.loads(line) for line in file] return cls(rows) @@ -785,9 +659,8 @@ def from_json(cls, local_path, header=None, line_delimited=False): return cls(petl.fromjson(local_path, header=header)) @classmethod - def from_redshift( - cls, sql, username=None, password=None, host=None, db=None, port=None - ): + def from_redshift(cls, sql, username=None, password=None, host=None, + db=None, port=None): """ Create a ``parsons table`` from a Redshift query. @@ -813,14 +686,11 @@ def from_redshift( """ from parsons.databases.redshift import Redshift - rs = Redshift(username=username, password=password, host=host, db=db, port=port) return rs.query(sql) @classmethod - def from_postgres( - cls, sql, username=None, password=None, host=None, db=None, port=None - ): + def from_postgres(cls, sql, username=None, password=None, host=None, db=None, port=None): """ Args: sql: str @@ -838,20 +708,12 @@ def from_postgres( """ from parsons.databases.postgres import Postgres - pg = Postgres(username=username, password=password, host=host, db=db, port=port) return pg.query(sql) @classmethod - def from_s3_csv( - cls, - bucket, - key, - from_manifest=False, - aws_access_key_id=None, - aws_secret_access_key=None, - **csvargs, - ): + def from_s3_csv(cls, bucket, key, from_manifest=False, aws_access_key_id=None, + aws_secret_access_key=None, **csvargs): """ Create a ``parsons table`` from a key in an S3 bucket. @@ -874,7 +736,6 @@ def from_s3_csv( """ # noqa: W605 from parsons.aws import S3 - s3 = S3(aws_access_key_id, aws_secret_access_key) if from_manifest: @@ -891,7 +752,7 @@ def from_s3_csv( # TODO handle urls that end with '/', i.e. urls that point to "folders" _, _, bucket_, key_ = key.split("/", 3) file_ = s3.get_file(bucket_, key_) - if files.compression_type_for_path(key_) == "zip": + if files.compression_type_for_path(key_) == 'zip': file_ = zip_archive.unzip_archive(file_) tbls.append(petl.fromcsv(file_, **csvargs)) diff --git a/parsons/facebook_ads/__init__.py b/parsons/facebook_ads/__init__.py index f52d04906d..742bc201bb 100644 --- a/parsons/facebook_ads/__init__.py +++ b/parsons/facebook_ads/__init__.py @@ -1,3 +1,5 @@ from parsons.facebook_ads.facebook_ads import FacebookAds -__all__ = ["FacebookAds"] +__all__ = [ + 'FacebookAds' +] diff --git a/parsons/facebook_ads/facebook_ads.py b/parsons/facebook_ads/facebook_ads.py index 0733cfed8b..6568d91fbd 100644 --- a/parsons/facebook_ads/facebook_ads.py +++ b/parsons/facebook_ads/facebook_ads.py @@ -38,56 +38,52 @@ class FacebookAds(object): # method! # TODO add support for parsing full names from one column KeyMatchMap = { - FBKeySchema.email: ["email", "email address", "voterbase_email"], - FBKeySchema.fn: ["fn", "first", "first name", "vb_tsmart_first_name"], - FBKeySchema.ln: ["ln", "last", "last name", "vb_tsmart_last_name"], + FBKeySchema.email: ['email', 'email address', 'voterbase_email'], + FBKeySchema.fn: ['fn', 'first', 'first name', 'vb_tsmart_first_name'], + FBKeySchema.ln: ['ln', 'last', 'last name', 'vb_tsmart_last_name'], FBKeySchema.phone: [ - "phone", - "phone number", - "cell", - "landline", - "vb_voterbase_phone", - "vb_voterbase_phone_wireless", - ], - FBKeySchema.ct: ["ct", "city", "vb_vf_reg_city", "vb_tsmart_city"], + 'phone', + 'phone number', + 'cell', + 'landline', + 'vb_voterbase_phone', + 'vb_voterbase_phone_wireless' + ], + FBKeySchema.ct: ['ct', 'city', 'vb_vf_reg_city', 'vb_tsmart_city'], FBKeySchema.st: [ - "st", - "state", - "state code", - "vb_vf_source_state", - "vb_tsmart_state", - "vb_vf_reg_state", - "vb_vf_reg_cass_state", - ], - FBKeySchema.zip: ["zip", "zip code", "vb_vf_reg_zip", "vb_tsmart_zip"], - FBKeySchema.country: ["country", "country code"], + 'st', + 'state', + 'state code', + 'vb_vf_source_state', + 'vb_tsmart_state', + 'vb_vf_reg_state', + 'vb_vf_reg_cass_state' + ], + FBKeySchema.zip: ['zip', 'zip code', 'vb_vf_reg_zip', 'vb_tsmart_zip'], + FBKeySchema.country: ['country', 'country code'], # Yes, it's not kosher to confuse gender and sex. However, gender is all that FB # supports in their audience targeting. - FBKeySchema.gen: ["gen", "gender", "sex", "vb_voterbase_gender"], - FBKeySchema.doby: ["doby", "dob year", "birth year"], - FBKeySchema.dobm: ["dobm", "dob month", "birth month"], - FBKeySchema.dobd: ["dobd", "dob day", "birth day"], + FBKeySchema.gen: ['gen', 'gender', 'sex', 'vb_voterbase_gender'], + FBKeySchema.doby: ['doby', 'dob year', 'birth year'], + FBKeySchema.dobm: ['dobm', 'dob month', 'birth month'], + FBKeySchema.dobd: ['dobd', 'dob day', 'birth day'], } PreprocessKeyMatchMap = { # Data in this column will be parsed into the FBKeySchema.dobX keys. - "DOB YYYYMMDD": ["dob", "vb_voterbase_dob", "vb_tsmart_dob"] + "DOB YYYYMMDD": ['dob', 'vb_voterbase_dob', 'vb_tsmart_dob'] } - def __init__( - self, app_id=None, app_secret=None, access_token=None, ad_account_id=None - ): + def __init__(self, app_id=None, app_secret=None, access_token=None, ad_account_id=None): try: - self.app_id = app_id or os.environ["FB_APP_ID"] - self.app_secret = app_secret or os.environ["FB_APP_SECRET"] - self.access_token = access_token or os.environ["FB_ACCESS_TOKEN"] - self.ad_account_id = ad_account_id or os.environ["FB_AD_ACCOUNT_ID"] + self.app_id = app_id or os.environ['FB_APP_ID'] + self.app_secret = app_secret or os.environ['FB_APP_SECRET'] + self.access_token = access_token or os.environ['FB_ACCESS_TOKEN'] + self.ad_account_id = ad_account_id or os.environ['FB_AD_ACCOUNT_ID'] except KeyError as error: - logger.error( - "FB Marketing API credentials missing. Must be specified as env vars " - "or kwargs" - ) + logger.error("FB Marketing API credentials missing. Must be specified as env vars " + "or kwargs") raise error FacebookAdsApi.init(self.app_id, self.app_secret, self.access_token) @@ -121,13 +117,16 @@ def _preprocess_dob_column(table, column): # TODO Throw an error if the values are not 6 characters long? table.add_column( - FBKeySchema.doby, lambda row: row[column][:4] if row[column] else None + FBKeySchema.doby, + lambda row: row[column][:4] if row[column] else None ) table.add_column( - FBKeySchema.dobm, lambda row: row[column][4:6] if row[column] else None + FBKeySchema.dobm, + lambda row: row[column][4:6] if row[column] else None ) table.add_column( - FBKeySchema.dobd, lambda row: row[column][6:8] if row[column] else None + FBKeySchema.dobd, + lambda row: row[column][6:8] if row[column] else None ) table.remove_column(column) @@ -188,20 +187,20 @@ def get_match_table_for_users_table(users_table): for fb_key, orig_cols in fb_keys_to_orig_cols.items(): value_fn = ( - lambda bound_cols: lambda row: FacebookAds._get_first_non_empty_value_from_dict( - row, bound_cols - ) + lambda bound_cols: + lambda row: + FacebookAds._get_first_non_empty_value_from_dict(row, bound_cols) )(orig_cols) # A little trickery here to handle the case where one of the "orig_cols" is already # named like the "fb_key". - t.add_column(fb_key + "_fb_temp_col", value_fn) + t.add_column(fb_key+"_fb_temp_col", value_fn) t.remove_column(*orig_cols) - t.rename_column(fb_key + "_fb_temp_col", fb_key) + t.rename_column(fb_key+"_fb_temp_col", fb_key) # Convert None values to empty strings. Otherwise the FB SDK chokes. petl_table = t.to_petl() - t = Table(petl_table.replaceall(None, "")) + t = Table(petl_table.replaceall(None, '')) return t @@ -243,14 +242,14 @@ def create_custom_audience(self, name, data_source, description=None): raise KeyError("Invalid data_source provided") params = { - "name": name, - "subtype": "CUSTOM", - "description": description, - "customer_file_source": data_source, + 'name': name, + 'subtype': 'CUSTOM', + 'description': description, + 'customer_file_source': data_source, } res = self.ad_account.create_custom_audience(params=params) - return res["id"] + return res['id'] def delete_custom_audience(self, audience_id): """ @@ -264,16 +263,8 @@ def delete_custom_audience(self, audience_id): CustomAudience(audience_id).api_delete() @staticmethod - def _add_batch_to_custom_audience( - app_id, - app_secret, - access_token, - audience_id, - schema, - batch, - added_so_far, - total_rows, - ): + def _add_batch_to_custom_audience(app_id, app_secret, access_token, audience_id, schema, + batch, added_so_far, total_rows): # Since this method runs in parallel, we need to re-initialize the Facebook API each time # to avoid SSL-related errors. Basically, the FacebookAdsApi python framework isn't # built to run in parallel. @@ -281,9 +272,7 @@ def _add_batch_to_custom_audience( # Note that the FB SDK handles basic normalization and hashing of the data CustomAudience(audience_id).add_users(schema, batch, is_raw=True) - logger.info( - f"Added {added_so_far+len(batch)}/{total_rows} users to custom audience..." - ) + logger.info(f"Added {added_so_far+len(batch)}/{total_rows} users to custom audience...") def add_users_to_custom_audience(self, audience_id, users_table): """ @@ -358,19 +347,15 @@ def add_users_to_custom_audience(self, audience_id, users_table): users_table: obj Parsons table - """ # noqa: E501,E261 + """ # noqa: E501,E261 - logger.info( - f"Adding custom audience users from provided table with " - f"{users_table.num_rows} rows" - ) + logger.info(f"Adding custom audience users from provided table with " + f"{users_table.num_rows} rows") match_table = FacebookAds.get_match_table_for_users_table(users_table) if not match_table.columns: - raise KeyError( - "No valid columns found for audience matching. " - "See FacebookAds.KeyMatchMap for supported columns" - ) + raise KeyError("No valid columns found for audience matching. " + "See FacebookAds.KeyMatchMap for supported columns") num_rows = match_table.num_rows logger.info(f"Found {num_rows} rows with valid FB matching keys") @@ -385,17 +370,11 @@ def add_users_to_custom_audience(self, audience_id, users_table): parallel_jobs = ( delayed(FacebookAds._add_batch_to_custom_audience)( - self.app_id, - self.app_secret, - self.access_token, - audience_id, - schema, - data[i : i + batch_size], - i, - num_rows, + self.app_id, self.app_secret, self.access_token, audience_id, schema, + data[i:i+batch_size], i, num_rows ) for i in range(0, len(data), batch_size) ) - n_jobs = os.environ.get("PARSONS_NUM_PARALLEL_JOBS", 4) + n_jobs = os.environ.get('PARSONS_NUM_PARALLEL_JOBS', 4) Parallel(n_jobs=n_jobs)(parallel_jobs) diff --git a/parsons/freshdesk/__init__.py b/parsons/freshdesk/__init__.py index 45302adfd7..70206a627a 100644 --- a/parsons/freshdesk/__init__.py +++ b/parsons/freshdesk/__init__.py @@ -1,3 +1,5 @@ from parsons.freshdesk.freshdesk import Freshdesk -__all__ = ["Freshdesk"] +__all__ = [ + 'Freshdesk' +] diff --git a/parsons/freshdesk/freshdesk.py b/parsons/freshdesk/freshdesk.py index e197742f54..ec177020d6 100644 --- a/parsons/freshdesk/freshdesk.py +++ b/parsons/freshdesk/freshdesk.py @@ -26,26 +26,26 @@ class Freshdesk: def __init__(self, domain, api_key): - self.api_key = check_env.check("FRESHDESK_API_KEY", api_key) - self.domain = check_env.check("FRESHDESK_DOMAIN", domain) - self.uri = f"https://{self.domain}.freshdesk.com/api/v2/" - self.client = APIConnector(self.uri, auth=(self.api_key, "x")) + self.api_key = check_env.check('FRESHDESK_API_KEY', api_key) + self.domain = check_env.check('FRESHDESK_DOMAIN', domain) + self.uri = f'https://{self.domain}.freshdesk.com/api/v2/' + self.client = APIConnector(self.uri, auth=(self.api_key, 'x')) def _get_request(self, endpoint, params=None): - base_params = {"per_page": PAGE_SIZE} + base_params = {'per_page': PAGE_SIZE} if params: base_params.update(params) - r = self.client.request(endpoint, "GET", params=base_params) + r = self.client.request(endpoint, 'GET', params=base_params) self.client.validate_response(r) data = r.json() # Paginate - while "link" in r.headers.keys(): - logger.info(f"Retrieving another page of {PAGE_SIZE} records.") - url = re.search("<(.*)>", r.headers["link"]).group(1) - r = self.client.request(url, "GET", params=params) + while 'link' in r.headers.keys(): + logger.info(f'Retrieving another page of {PAGE_SIZE} records.') + url = re.search('<(.*)>', r.headers['link']).group(1) + r = self.client.request(url, 'GET', params=params) self.client.validate_response(r) data.extend(r.json()) @@ -54,22 +54,15 @@ def _get_request(self, endpoint, params=None): @staticmethod def _transform_table(tbl, expand_custom_fields=None): if tbl.num_rows > 0: - tbl.move_column("id", 0) + tbl.move_column('id', 0) tbl.sort() if expand_custom_fields: - tbl.unpack_dict("custom_fields", prepend=False) + tbl.unpack_dict('custom_fields', prepend=False) return tbl - def get_tickets( - self, - ticket_type=None, - requester_id=None, - requester_email=None, - company_id=None, - updated_since="2016-01-01", - expand_custom_fields=False, - ): + def get_tickets(self, ticket_type=None, requester_id=None, requester_email=None, + company_id=None, updated_since='2016-01-01', expand_custom_fields=False): """ List tickets. @@ -104,28 +97,18 @@ def get_tickets( See :ref:`parsons-table` for output options. """ - params = { - "filter": ticket_type, - "requester_id": requester_id, - "requester_email": requester_email, - "company_id": company_id, - "updated_since": updated_since, - } + params = {'filter': ticket_type, + 'requester_id': requester_id, + 'requester_email': requester_email, + 'company_id': company_id, + 'updated_since': updated_since} - tbl = Table(self._get_request("tickets", params=params)) - logger.info(f"Found {tbl.num_rows} tickets.") + tbl = Table(self._get_request('tickets', params=params)) + logger.info(f'Found {tbl.num_rows} tickets.') return self._transform_table(tbl, expand_custom_fields) - def get_contacts( - self, - email=None, - mobile=None, - phone=None, - company_id=None, - state=None, - updated_since=None, - expand_custom_fields=None, - ): + def get_contacts(self, email=None, mobile=None, phone=None, company_id=None, + state=None, updated_since=None, expand_custom_fields=None): """ Get contacts. @@ -152,17 +135,15 @@ def get_contacts( See :ref:`parsons-table` for output options. """ - params = { - "email": email, - "mobile": mobile, - "phone": phone, - "company_id": company_id, - "state": state, - "_updated_since": updated_since, - } - - tbl = Table(self._get_request("contacts", params=params)) - logger.info(f"Found {tbl.num_rows} contacts.") + params = {'email': email, + 'mobile': mobile, + 'phone': phone, + 'company_id': company_id, + 'state': state, + '_updated_since': updated_since} + + tbl = Table(self._get_request('contacts', params=params)) + logger.info(f'Found {tbl.num_rows} contacts.') return self._transform_table(tbl, expand_custom_fields) def get_companies(self, expand_custom_fields=False): @@ -180,8 +161,8 @@ def get_companies(self, expand_custom_fields=False): See :ref:`parsons-table` for output options. """ - tbl = Table(self._get_request("companies")) - logger.info(f"Found {tbl.num_rows} companies.") + tbl = Table(self._get_request('companies')) + logger.info(f'Found {tbl.num_rows} companies.') return self._transform_table(tbl, expand_custom_fields) def get_agents(self, email=None, mobile=None, phone=None, state=None): @@ -205,11 +186,14 @@ def get_agents(self, email=None, mobile=None, phone=None, state=None): See :ref:`parsons-table` for output options. """ - params = {"email": email, "mobile": mobile, "phone": phone, "state": state} - tbl = Table(self._get_request("agents", params=params)) - logger.info(f"Found {tbl.num_rows} agents.") + params = {'email': email, + 'mobile': mobile, + 'phone': phone, + 'state': state} + tbl = Table(self._get_request('agents', params=params)) + logger.info(f'Found {tbl.num_rows} agents.') tbl = self._transform_table(tbl) - tbl = tbl.unpack_dict("contact", prepend=False) - tbl.remove_column("signature") # Removing since raw HTML might cause issues. + tbl = tbl.unpack_dict('contact', prepend=False) + tbl.remove_column('signature') # Removing since raw HTML might cause issues. return tbl diff --git a/parsons/geocode/__init__.py b/parsons/geocode/__init__.py index 914240006c..729962a33d 100644 --- a/parsons/geocode/__init__.py +++ b/parsons/geocode/__init__.py @@ -1,3 +1,5 @@ from parsons.geocode.census_geocoder import CensusGeocoder -__all__ = ["CensusGeocoder"] +__all__ = [ + 'CensusGeocoder' +] diff --git a/parsons/geocode/census_geocoder.py b/parsons/geocode/census_geocoder.py index b8542b4353..7b57848a4a 100644 --- a/parsons/geocode/census_geocoder.py +++ b/parsons/geocode/census_geocoder.py @@ -22,13 +22,13 @@ class CensusGeocoder(object): vintage: str The US Census vintage file to utilize. By default the current vintage is used, but other options can be found `here `_. - """ # noqa E501 + """ # noqa E501 - def __init__(self, benchmark="Public_AR_Current", vintage="Current_Current"): + def __init__(self, benchmark='Public_AR_Current', vintage='Current_Current'): self.cg = censusgeocode.CensusGeocode(benchmark=benchmark, vintage=vintage) - def geocode_onelineaddress(self, address, return_type="geographies"): + def geocode_onelineaddress(self, address, return_type='geographies'): """ Geocode a single line address. Does not require parsing of city and zipcode field. Returns geocode as well as other census block data. If the service is unable to geocode the address @@ -48,14 +48,8 @@ def geocode_onelineaddress(self, address, return_type="geographies"): self._log_result(geo) return geo - def geocode_address( - self, - address_line, - city=None, - state=None, - zipcode=None, - return_type="geographies", - ): + def geocode_address(self, address_line, city=None, state=None, zipcode=None, + return_type='geographies'): """ Geocode an address by specifying address fields. Returns the geocode as well as other census block data. @@ -90,12 +84,12 @@ def geocode_address_batch(self, table): :widths: 40 :header-rows: 1 - * - Column Names - * - id (must be unique) - * - street - * - city - * - state - * - zip + * - Column Data + * - Unique ID + * - Street + * - City + * - State + * - Zipcode `Args:` table: Parsons Table @@ -104,24 +98,16 @@ def geocode_address_batch(self, table): A Parsons table """ - logger.info(f"Geocoding {table.num_rows} records.") - if set(table.columns) != {"street", "city", "state", "zip"}: - msg = ( - "Table must ONLY include `['id', 'street', 'city', 'state', 'zip']` as" - + "columns. Tip: try using `table.cut()`" - ) - raise ValueError(msg) - + logger.info(f'Geocoding {table.num_rows} records.') chunked_tables = table.chunk(BATCH_SIZE) batch_count = 1 records_processed = 0 geocoded_tbl = Table([[]]) for tbl in chunked_tables: - geocoded_tbl.concat(Table(petl.fromdicts(self.cg.addressbatch(tbl)))) records_processed += tbl.num_rows - logger.info(f"{records_processed} of {table.num_rows} records processed.") + logger.info(f'{records_processed} of {table.num_rows} records processed.') batch_count += 1 return geocoded_tbl @@ -130,9 +116,9 @@ def _log_result(self, dict): # Internal method to log the result of the geocode if len(dict) == 0: - logger.info("Unable to geocode record.") + logger.info('Unable to geocode record.') else: - logger.info("Record geocoded.") + logger.info('Record geocoded.') def get_coordinates_data(self, latitude, longitude): """ @@ -148,8 +134,8 @@ def get_coordinates_data(self, latitude, longitude): """ geo = self.cg.coordinates(x=longitude, y=latitude) - if len(geo["States"]) == 0: - logger.info("Coordinate not found.") + if len(geo['States']) == 0: + logger.info('Coordinate not found.') else: - logger.info("Coordinate processed.") + logger.info('Coordinate processed.') return geo diff --git a/parsons/github/__init__.py b/parsons/github/__init__.py index fe5df4a822..a432b0498d 100644 --- a/parsons/github/__init__.py +++ b/parsons/github/__init__.py @@ -1,3 +1,5 @@ from parsons.github.github import GitHub -__all__ = ["GitHub"] +__all__ = [ + 'GitHub' +] diff --git a/parsons/github/github.py b/parsons/github/github.py index 95f4ee3315..2c95e40e72 100644 --- a/parsons/github/github.py +++ b/parsons/github/github.py @@ -13,11 +13,9 @@ def _wrap_method(decorator, method): - @wraps(method) def _wrapper(self, *args, **kwargs): bound_method = partial(method.__get__(self, type(self))) return decorator(bound_method)(*args, **kwargs) - return _wrapper @@ -32,7 +30,6 @@ def decorate(cls): if callable(cls_method): setattr(cls, method, _wrap_method(decorator, cls_method)) return cls - return decorate @@ -45,7 +42,6 @@ def _wrapped_func(*args, **kwargs): raise ParsonsGitHubError( "Couldn't find the object you referenced, maybe you need to log in?" ) - return _wrapped_func @@ -75,11 +71,9 @@ class GitHub(object): def __init__(self, username=None, password=None, access_token=None): - self.username = check_env.check("GITHUB_USERNAME", username, optional=True) - self.password = check_env.check("GITHUB_PASSWORD", password, optional=True) - self.access_token = check_env.check( - "GITHUB_ACCESS_TOKEN", access_token, optional=True - ) + self.username = check_env.check('GITHUB_USERNAME', username, optional=True) + self.password = check_env.check('GITHUB_PASSWORD', password, optional=True) + self.access_token = check_env.check('GITHUB_ACCESS_TOKEN', access_token, optional=True) if self.username and self.password: self.client = PyGithub(self.username, self.password) @@ -173,7 +167,7 @@ def list_user_repos(self, username, page=None, page_size=100): Table with page of user repos """ - logger.info(f"Listing page {page} of repos for user {username}") + logger.info(f'Listing page {page} of repos for user {username}') return self._as_table( self.client.get_user(username).get_repos(), page=page, page_size=page_size @@ -195,9 +189,7 @@ def list_organization_repos(self, organization_name, page=None, page_size=100): Table with page of organization repos """ - logger.info( - f"Listing page {page} of repos for organization {organization_name}" - ) + logger.info(f'Listing page {page} of repos for organization {organization_name}') return self._as_table( self.client.get_organization(organization_name).get_repos(), @@ -221,20 +213,9 @@ def get_issue(self, repo_name, issue_number): return self.client.get_repo(repo_name).get_issue(number=issue_number).raw_data - def list_repo_issues( - self, - repo_name, - state="open", - assignee=None, - creator=None, - mentioned=None, - labels=[], - sort="created", - direction="desc", - since=None, - page=None, - page_size=100, - ): + def list_repo_issues(self, repo_name, state="open", assignee=None, creator=None, mentioned=None, + labels=[], sort="created", direction="desc", since=None, page=None, + page_size=100): """List issues for a given repo Args: @@ -267,7 +248,7 @@ def list_repo_issues( Table with page of repo issues """ - logger.info(f"Listing page {page} of issues for repo {repo_name}") + logger.info(f'Listing page {page} of issues for repo {repo_name}') kwargs_dict = {"state": state, "sort": sort, "direction": direction} if assignee: @@ -279,7 +260,7 @@ def list_repo_issues( if len(labels) > 0: kwargs_dict["labels"] = ",".join(labels) if since: - kwargs_dict["since"] = f"{since.isoformat()[:19]}Z" + kwargs_dict["since"] = f'{since.isoformat()[:19]}Z' return self._as_table( self.client.get_repo(repo_name).get_issues(**kwargs_dict), @@ -303,16 +284,8 @@ def get_pull_request(self, repo_name, pull_request_number): return self.client.get_repo(repo_name).get_pull(pull_request_number).raw_data - def list_repo_pull_requests( - self, - repo_name, - state="open", - base=None, - sort="created", - direction="desc", - page=None, - page_size=100, - ): + def list_repo_pull_requests(self, repo_name, state="open", base=None, sort="created", + direction="desc", page=None, page_size=100): """Lists pull requests for a given repo Args: @@ -337,16 +310,14 @@ def list_repo_pull_requests( Table with page of repo pull requests """ - logger.info(f"Listing page {page} of pull requests for repo {repo_name}") + logger.info(f'Listing page {page} of pull requests for repo {repo_name}') kwargs_dict = {"state": state, "sort": sort, "direction": direction} if base: kwargs_dict["base"] = base self._as_table( - self.client.get_repo(repo_name).get_pulls(**kwargs_dict), - page=page, - page_size=page_size, + self.client.get_repo(repo_name).get_pulls(**kwargs_dict), page=page, page_size=page_size ) def list_repo_contributors(self, repo_name, page=None, page_size=100): @@ -365,12 +336,10 @@ def list_repo_contributors(self, repo_name, page=None, page_size=100): Table with page of repo contributors """ - logger.info(f"Listing page {page} of contributors for repo {repo_name}") + logger.info(f'Listing page {page} of contributors for repo {repo_name}') return self._as_table( - self.client.get_repo(repo_name).get_contributors(), - page=page, - page_size=page_size, + self.client.get_repo(repo_name).get_contributors(), page=page, page_size=page_size ) def download_file(self, repo_name, path, branch=None, local_path=None): @@ -406,38 +375,31 @@ def download_file(self, repo_name, path, branch=None, local_path=None): if branch is None: branch = repo.default_branch - logger.info( - f"Downloading {path} from {repo_name}, branch {branch} to {local_path}" - ) + logger.info(f'Downloading {path} from {repo_name}, branch {branch} to {local_path}') headers = None if self.access_token: headers = { - "Authorization": f"token {self.access_token}", + 'Authorization': f'token {self.access_token}', } - res = requests.get( - f"https://raw.githubusercontent.com/{repo_name}/{branch}/{path}", - headers=headers, - ) + res = requests.get(f'https://raw.githubusercontent.com/{repo_name}/{branch}/{path}', + headers=headers) if res.status_code == 404: raise UnknownObjectException(status=404, data=res.content) elif res.status_code != 200: raise ParsonsGitHubError( - f"Error downloading {path} from repo {repo_name}: {res.content}" - ) + f'Error downloading {path} from repo {repo_name}: {res.content}') - with open(local_path, "wb") as f: + with open(local_path, 'wb') as f: f.write(res.content) - logger.info(f"Downloaded {path} to {local_path}") + logger.info(f'Downloaded {path} to {local_path}') return local_path - def download_table( - self, repo_name, path, branch=None, local_path=None, delimiter="," - ): + def download_table(self, repo_name, path, branch=None, local_path=None, delimiter=','): """Download a CSV file from a repo by path and branch as a Parsons Table. Args: diff --git a/parsons/google/google_admin.py b/parsons/google/google_admin.py deleted file mode 100644 index c1141d4b9d..0000000000 --- a/parsons/google/google_admin.py +++ /dev/null @@ -1,118 +0,0 @@ -from oauth2client.service_account import ServiceAccountCredentials -from parsons.etl.table import Table -from parsons.google.utitities import setup_google_application_credentials -import httplib2 -import json -import os - - -class GoogleAdmin(object): - """ - A connector for Google Admin. - - - `Args:` - app_creds: str - A credentials json string or a path to a json file. Not required if - ``GOOGLE_APPLICATION_CREDENTIALS`` env variable set. - sub: str - An email address that this service account will act on behalf of (via domain-wide - delegation) - `Returns:` - GoogleAdmin Class - """ - - def __init__(self, app_creds=None, sub=None): - setup_google_application_credentials(app_creds) - - self.client = ( - ServiceAccountCredentials.from_json_keyfile_name( - os.environ["GOOGLE_APPLICATION_CREDENTIALS"], - ["https://www.googleapis.com/auth/admin.directory.group"], - ) - .create_delegated(sub) - .authorize(httplib2.Http()) - ) - - def _paginate_request(self, endpoint, collection, params=None): - # Build query params - param_arr = [] - param_str = "" - if params: - for key, value in params.items(): - param_arr.append(key + "=" + value) - param_str = "?" + "&".join(param_arr) - - # Make API call - req_url = "https://admin.googleapis.com/admin/directory/v1/" + endpoint - - # Return type from Google Admin is a tuple of length 2. Extract desired result from 2nd item - # in tuple and convert to json - res = json.loads( - self.client.request(req_url + param_str, "GET")[1].decode("utf-8") - ) - - # Paginate - ret = [] - if collection in res: - ret = res[collection] - - while "nextPageToken" in res: - if param_arr[-1][0:10] != "pageToken=": - param_arr.append("pageToken=" + res["nextPageToken"]) - else: - param_arr[-1] = "pageToken=" + res["nextPageToken"] - res = json.loads( - self.client.request(req_url + "?" + "&".join(param_arr), "GET")[ - 1 - ].decode("utf-8") - ) - ret += res[collection] - - return Table(ret) - - def get_aliases(self, group_key, params=None): - """ - Get aliases for a group. `Google Admin API Documentation `_ - - `Args:` - group_key: str - The Google group id - params: dict - A dictionary of fields for the GET request - `Returns:` - Table Class - """ - return self._paginate_request( - "groups/" + group_key + "/aliases", "aliases", params - ) - - def get_all_group_members(self, group_key, params=None): - """ - Get all members in a group. `Google Admin API Documentation `_ - - `Args:` - group_key: str - The Google group id - params: dict - A dictionary of fields for the GET request - `Returns:` - Table Class - """ - return self._paginate_request( - "groups/" + group_key + "/members", "members", params - ) - - def get_all_groups(self, params=None): - """ - Get all groups in a domain or account. `Google Admin API Documentation `_ - `Args:` - params: dict - A dictionary of fields for the GET request. - `Returns:` - Table Class - """ - return self._paginate_request("groups", "groups", params) diff --git a/parsons/google/google_bigquery.py b/parsons/google/google_bigquery.py index f10641c466..e4ff0ce320 100644 --- a/parsons/google/google_bigquery.py +++ b/parsons/google/google_bigquery.py @@ -1,15 +1,12 @@ import pickle -from typing import Optional, Union import uuid from google.cloud import bigquery from google.cloud.bigquery import dbapi -from google.cloud.bigquery.job import LoadJobConfig from google.cloud import exceptions import petl from parsons.databases.table import BaseTable -from parsons.databases.database_connector import DatabaseConnector from parsons.etl import Table from parsons.google.utitities import setup_google_application_credentials from parsons.google.google_cloud_storage import GoogleCloudStorage @@ -17,15 +14,14 @@ from parsons.utilities.files import create_temp_file BIGQUERY_TYPE_MAP = { - "str": "STRING", - "float": "FLOAT", - "int": "INTEGER", - "bool": "BOOLEAN", - "datetime.datetime": "DATETIME", - "datetime.date": "DATE", - "datetime.time": "TIME", - "dict": "RECORD", - "NoneType": "STRING", + 'str': 'STRING', + 'float': 'FLOAT', + 'int': 'INTEGER', + 'bool': 'BOOLEAN', + 'datetime.datetime': 'DATETIME', + 'datetime.date': 'DATE', + 'datetime.time': 'TIME', + 'dict': 'RECORD', } # Max number of rows that we query at a time, so we can avoid loading huge @@ -37,29 +33,29 @@ def get_table_ref(client, table_name): # Helper function to build a TableReference for our table parsed = parse_table_name(table_name) - dataset_ref = client.dataset(parsed["dataset"]) - return dataset_ref.table(parsed["table"]) + dataset_ref = client.dataset(parsed['dataset']) + return dataset_ref.table(parsed['table']) def parse_table_name(table_name): # Helper function to parse out the different components of a table ID - parts = table_name.split(".") + parts = table_name.split('.') parts.reverse() parsed = { - "project": None, - "dataset": None, - "table": None, + 'project': None, + 'dataset': None, + 'table': None, } if len(parts) > 0: - parsed["table"] = parts[0] + parsed['table'] = parts[0] if len(parts) > 1: - parsed["dataset"] = parts[1] + parsed['dataset'] = parts[1] if len(parts) > 2: - parsed["project"] = parts[2] + parsed['project'] = parts[2] return parsed -class GoogleBigQuery(DatabaseConnector): +class GoogleBigQuery: """ Class for querying BigQuery table and returning the data as Parsons tables. @@ -101,18 +97,10 @@ def __init__(self, app_creds=None, project=None, location=None): self._dbapi = dbapi - self.dialect = "bigquery" - - def copy( - self, - tbl: Table, - table_name: str, - if_exists: str = "fail", - tmp_gcs_bucket: Optional[str] = None, - gcs_client: Optional[GoogleCloudStorage] = None, - job_config: Optional[LoadJobConfig] = None, - **load_kwargs, - ): + self.dialect = 'bigquery' + + def copy(self, table_obj, table_name, if_exists='fail', + tmp_gcs_bucket=None, gcs_client=None, job_config=None, **load_kwargs): """ Copy a :ref:`parsons-table` into Google BigQuery via Google Cloud Storage. @@ -136,13 +124,11 @@ def copy( Arguments to pass to the underlying load_table_from_uri call on the BigQuery client. """ - tmp_gcs_bucket = check_env.check("GCS_TEMP_BUCKET", tmp_gcs_bucket) + tmp_gcs_bucket = check_env.check('GCS_TEMP_BUCKET', tmp_gcs_bucket) - if if_exists not in ["fail", "truncate", "append", "drop"]: - raise ValueError( - f"Unexpected value for if_exists: {if_exists}, must be one of " - '"append", "drop", "truncate", or "fail"' - ) + if if_exists not in ['fail', 'truncate', 'append', 'drop']: + raise ValueError(f'Unexpected value for if_exists: {if_exists}, must be one of ' + '"append", "drop", "truncate", or "fail"') table_exists = self.table_exists(table_name) @@ -150,7 +136,7 @@ def copy( job_config = bigquery.LoadJobConfig() if not job_config.schema: - job_config.schema = self._generate_schema(tbl) + job_config.schema = self._generate_schema(table_obj) if not job_config.create_disposition: job_config.create_disposition = bigquery.CreateDisposition.CREATE_IF_NEEDED @@ -159,27 +145,25 @@ def copy( job_config.write_disposition = bigquery.WriteDisposition.WRITE_EMPTY if table_exists: - if if_exists == "fail": - raise ValueError("Table already exists.") - elif if_exists == "drop": + if if_exists == 'fail': + raise ValueError('Table already exists.') + elif if_exists == 'drop': self.delete_table(table_name) - elif if_exists == "append": + elif if_exists == 'append': job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND - elif if_exists == "truncate": + elif if_exists == 'truncate': job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE gcs_client = gcs_client or GoogleCloudStorage() - temp_blob_name = f"{uuid.uuid4()}.csv" - temp_blob_uri = gcs_client.upload_table(tbl, tmp_gcs_bucket, temp_blob_name) + temp_blob_name = f'{uuid.uuid4()}.csv' + temp_blob_uri = gcs_client.upload_table(table_obj, tmp_gcs_bucket, temp_blob_name) # load CSV from Cloud Storage into BigQuery table_ref = get_table_ref(self.client, table_name) try: load_job = self.client.load_table_from_uri( - temp_blob_uri, - table_ref, - job_config=job_config, - **load_kwargs, + temp_blob_uri, table_ref, + job_config=job_config, **load_kwargs, ) load_job.result() finally: @@ -196,9 +180,7 @@ def delete_table(self, table_name): table_ref = get_table_ref(self.client, table_name) self.client.delete_table(table_ref) - def query( - self, sql: str, parameters: Optional[Union[list, dict]] = None - ) -> Optional[Table]: + def query(self, sql, parameters=None): """ Run a BigQuery query and return the results as a Parsons table. @@ -243,7 +225,7 @@ def query( temp_filename = create_temp_file() wrote_header = False - with open(temp_filename, "wb") as temp_file: + with open(temp_filename, 'wb') as temp_file: # Track whether we got data, since if we don't get any results we need to return None got_results = False while True: @@ -271,7 +253,7 @@ def query( return final_table - def table_exists(self, table_name: str) -> bool: + def table_exists(self, table_name): """ Check whether or not the Google BigQuery table exists in the specified dataset. @@ -308,10 +290,10 @@ def _generate_schema(self, tbl): stats = tbl.get_columns_type_stats() fields = [] for stat in stats: - petl_types = stat["type"] - best_type = "str" if "str" in petl_types else petl_types[0] + petl_types = stat['type'] + best_type = 'str' if 'str' in petl_types else petl_types[0] field_type = self._bigquery_type(best_type) - field = bigquery.schema.SchemaField(stat["name"], field_type) + field = bigquery.schema.SchemaField(stat['name'], field_type) fields.append(field) return fields @@ -349,6 +331,4 @@ def truncate(self): job_config.schema = bq_table.schema empty_table = Table([]) - self.db.copy( - empty_table, self.table, if_exists="truncate", job_config=job_config - ) + self.db.copy(empty_table, self.table, if_exists='truncate', job_config=job_config) diff --git a/parsons/google/google_civic.py b/parsons/google/google_civic.py index 33714ba390..96bdbac93f 100644 --- a/parsons/google/google_civic.py +++ b/parsons/google/google_civic.py @@ -2,7 +2,7 @@ import requests from parsons.etl import Table -URI = "https://www.googleapis.com/civicinfo/v2/" +URI = 'https://www.googleapis.com/civicinfo/v2/' class GoogleCivic(object): @@ -17,7 +17,7 @@ class GoogleCivic(object): def __init__(self, api_key=None): - self.api_key = check_env.check("GOOGLE_CIVIC_API_KEY", api_key) + self.api_key = check_env.check('GOOGLE_CIVIC_API_KEY', api_key) self.uri = URI def request(self, url, args=None): @@ -26,7 +26,7 @@ def request(self, url, args=None): if not args: args = {} - args["key"] = self.api_key + args['key'] = self.api_key r = requests.get(url, params=args) @@ -41,17 +41,17 @@ def get_elections(self): See :ref:`parsons-table` for output options. """ - url = self.uri + "elections" + url = self.uri + 'elections' - return Table((self.request(url))["elections"]) + return Table((self.request(url))['elections']) def _get_voter_info(self, election_id, address): # Internal method to call voter info end point. Portions of this are # parsed for other methods. - url = self.uri + "voterinfo" + url = self.uri + 'voterinfo' - args = {"address": address, "electionId": election_id} + args = {'address': address, 'electionId': election_id} return self.request(url, args=args) @@ -72,9 +72,9 @@ def get_polling_location(self, election_id, address): r = self._get_voter_info(election_id, address) - return r["pollingLocations"] + return r['pollingLocations'] - def get_polling_locations(self, election_id, table, address_field="address"): + def get_polling_locations(self, election_id, table, address_field='address'): """ Get polling location information for a table of addresses. @@ -97,22 +97,22 @@ def get_polling_locations(self, election_id, table, address_field="address"): for row in table: loc = self.get_polling_location(election_id, row[address_field]) # Insert original passed address - loc[0]["passed_address"] = row[address_field] + loc[0]['passed_address'] = row[address_field] # Add to list of lists polling_locations.append(loc[0]) # Unpack values tbl = Table(polling_locations) - tbl.unpack_dict("address", prepend_value="polling") - tbl.unpack_list("sources", replace=True) - tbl.unpack_dict("sources_0", prepend_value="source") - tbl.rename_column("polling_line1", "polling_address") + tbl.unpack_dict('address', prepend_value='polling') + tbl.unpack_list('sources', replace=True) + tbl.unpack_dict('sources_0', prepend_value='source') + tbl.rename_column('polling_line1', 'polling_address') # Resort columns - tbl.move_column("pollingHours", len(tbl.columns)) - tbl.move_column("notes", len(tbl.columns)) - tbl.move_column("polling_locationName", 1) - tbl.move_column("polling_address", 2) + tbl.move_column('pollingHours', len(tbl.columns)) + tbl.move_column('notes', len(tbl.columns)) + tbl.move_column('polling_locationName', 1) + tbl.move_column('polling_address', 2) return tbl diff --git a/parsons/google/google_cloud_storage.py b/parsons/google/google_cloud_storage.py index 0b6065ef2b..c2ce0e9738 100644 --- a/parsons/google/google_cloud_storage.py +++ b/parsons/google/google_cloud_storage.py @@ -56,7 +56,7 @@ def list_buckets(self): """ buckets = [b.name for b in self.client.list_buckets()] - logger.info(f"Found {len(buckets)}.") + logger.info(f'Found {len(buckets)}.') return buckets def bucket_exists(self, bucket_name): @@ -71,10 +71,10 @@ def bucket_exists(self, bucket_name): """ if bucket_name in self.list_buckets(): - logger.info(f"{bucket_name} exists.") + logger.info(f'{bucket_name} exists.') return True else: - logger.info(f"{bucket_name} does not exist.") + logger.info(f'{bucket_name} does not exist.') return False def get_bucket(self, bucket_name): @@ -91,9 +91,9 @@ def get_bucket(self, bucket_name): if self.client.lookup_bucket(bucket_name): bucket = self.client.get_bucket(bucket_name) else: - raise google.cloud.exceptions.NotFound("Bucket not found") + raise google.cloud.exceptions.NotFound('Bucket not found') - logger.info(f"Returning {bucket_name} object") + logger.info(f'Returning {bucket_name} object') return bucket def create_bucket(self, bucket_name): @@ -110,7 +110,7 @@ def create_bucket(self, bucket_name): # To Do: Allow user to set all of the bucket parameters self.client.create_bucket(bucket_name) - logger.info(f"Created {bucket_name} bucket.") + logger.info(f'Created {bucket_name} bucket.') def delete_bucket(self, bucket_name, delete_blobs=False): """ @@ -128,7 +128,7 @@ def delete_bucket(self, bucket_name, delete_blobs=False): bucket = self.get_bucket(bucket_name) bucket.delete(force=delete_blobs) - logger.info(f"{bucket_name} bucket deleted.") + logger.info(f'{bucket_name} bucket deleted.') def list_blobs(self, bucket_name, max_results=None, prefix=None): """ @@ -145,11 +145,9 @@ def list_blobs(self, bucket_name, max_results=None, prefix=None): A list of blob names """ - blobs = self.client.list_blobs( - bucket_name, max_results=max_results, prefix=prefix - ) + blobs = self.client.list_blobs(bucket_name, max_results=max_results, prefix=prefix) lst = [b.name for b in blobs] - logger.info(f"Found {len(lst)} in {bucket_name} bucket.") + logger.info(f'Found {len(lst)} in {bucket_name} bucket.') return lst @@ -167,10 +165,10 @@ def blob_exists(self, bucket_name, blob_name): """ if blob_name in self.list_blobs(bucket_name): - logger.info(f"{blob_name} exists.") + logger.info(f'{blob_name} exists.') return True else: - logger.info(f"{blob_name} does not exist.") + logger.info(f'{blob_name} does not exist.') return False def get_blob(self, bucket_name, blob_name): @@ -188,7 +186,7 @@ def get_blob(self, bucket_name, blob_name): bucket = self.get_bucket(bucket_name) blob = bucket.get_blob(blob_name) - logger.debug(f"Got {blob_name} object from {bucket_name} bucket.") + logger.debug(f'Got {blob_name} object from {bucket_name} bucket.') return blob def put_blob(self, bucket_name, blob_name, local_path): @@ -212,7 +210,7 @@ def put_blob(self, bucket_name, blob_name, local_path): with open(local_path, "rb") as f: blob.upload_from_file(f) - logger.info(f"{blob_name} put in {bucket_name} bucket.") + logger.info(f'{blob_name} put in {bucket_name} bucket.') def download_blob(self, bucket_name, blob_name, local_path=None): """ @@ -233,15 +231,15 @@ def download_blob(self, bucket_name, blob_name, local_path=None): """ if not local_path: - local_path = files.create_temp_file_for_path("TEMPTHING") + local_path = files.create_temp_file_for_path('TEMPTHING') bucket = storage.Bucket(self.client, name=bucket_name) blob = storage.Blob(blob_name, bucket) - logger.info(f"Downloading {blob_name} from {bucket_name} bucket.") - with open(local_path, "wb") as f: + logger.info(f'Downloading {blob_name} from {bucket_name} bucket.') + with open(local_path, 'wb') as f: blob.download_to_file(f, client=self.client) - logger.info(f"{blob_name} saved to {local_path}.") + logger.info(f'{blob_name} saved to {local_path}.') return local_path @@ -260,11 +258,9 @@ def delete_blob(self, bucket_name, blob_name): blob = self.get_blob(bucket_name, blob_name) blob.delete() - logger.info(f"{blob_name} blob in {bucket_name} bucket deleted.") + logger.info(f'{blob_name} blob in {bucket_name} bucket deleted.') - def upload_table( - self, table, bucket_name, blob_name, data_type="csv", default_acl=None - ): + def upload_table(self, table, bucket_name, blob_name, data_type='csv', default_acl=None): """ Load the data from a Parsons table into a blob. @@ -281,28 +277,22 @@ def upload_table( bucket = storage.Bucket(self.client, name=bucket_name) blob = storage.Blob(blob_name, bucket) - if data_type == "csv": + if data_type == 'csv': local_file = table.to_csv() - content_type = "text/csv" - elif data_type == "json": + content_type = 'text/csv' + elif data_type == 'json': local_file = table.to_json() - content_type = "application/json" + content_type = 'application/json' else: - raise ValueError( - f"Unknown data_type value ({data_type}): must be one of: csv or json" - ) + raise ValueError(f'Unknown data_type value ({data_type}): must be one of: csv or json') try: - blob.upload_from_filename( - local_file, - content_type=content_type, - client=self.client, - predefined_acl=default_acl, - ) + blob.upload_from_filename(local_file, content_type=content_type, client=self.client, + predefined_acl=default_acl) finally: files.close_temp_file(local_file) - return f"gs://{bucket_name}/{blob_name}" + return f'gs://{bucket_name}/{blob_name}' def get_url(self, bucket_name, blob_name, expires_in=60): """ @@ -322,9 +312,7 @@ def get_url(self, bucket_name, blob_name, expires_in=60): bucket = self.client.bucket(bucket_name) blob = bucket.blob(blob_name) - url = blob.generate_signed_url( - version="v4", - expiration=datetime.timedelta(minutes=expires_in), - method="GET", - ) + url = blob.generate_signed_url(version="v4", + expiration=datetime.timedelta(minutes=expires_in), + method="GET") return url diff --git a/parsons/google/google_sheets.py b/parsons/google/google_sheets.py index 5890763a01..73fc4d09d2 100644 --- a/parsons/google/google_sheets.py +++ b/parsons/google/google_sheets.py @@ -6,7 +6,7 @@ from parsons.google.utitities import setup_google_application_credentials import gspread -from google.oauth2.service_account import Credentials +from oauth2client.service_account import ServiceAccountCredentials logger = logging.getLogger(__name__) @@ -20,28 +20,22 @@ class GoogleSheets: A dictionary of Google Drive API credentials, parsed from JSON provided by the Google Developer Console. Required if env variable ``GOOGLE_DRIVE_CREDENTIALS`` is not populated. - subject: string - In order to use account impersonation, pass in the email address of the account to be - impersonated as a string. """ - def __init__(self, google_keyfile_dict=None, subject=None): + def __init__(self, google_keyfile_dict=None): scope = [ - "https://spreadsheets.google.com/feeds", - "https://www.googleapis.com/auth/drive", - ] + 'https://spreadsheets.google.com/feeds', + 'https://www.googleapis.com/auth/drive', + ] - setup_google_application_credentials( - google_keyfile_dict, "GOOGLE_DRIVE_CREDENTIALS" - ) - google_credential_file = open(os.environ["GOOGLE_DRIVE_CREDENTIALS"]) + setup_google_application_credentials(google_keyfile_dict, 'GOOGLE_DRIVE_CREDENTIALS') + google_credential_file = open(os.environ['GOOGLE_DRIVE_CREDENTIALS']) credentials_dict = json.load(google_credential_file) - credentials = Credentials.from_service_account_info( - credentials_dict, scopes=scope, subject=subject + credentials = ServiceAccountCredentials.from_json_keyfile_dict( + credentials_dict, scope ) - self.gspread_client = gspread.authorize(credentials) def _get_worksheet(self, spreadsheet_id, worksheet=0): @@ -49,16 +43,12 @@ def _get_worksheet(self, spreadsheet_id, worksheet=0): # Check if the worksheet is an integer, if so find the sheet by index if isinstance(worksheet, int): - return self.gspread_client.open_by_key(spreadsheet_id).get_worksheet( - worksheet - ) + return self.gspread_client.open_by_key(spreadsheet_id).get_worksheet(worksheet) elif isinstance(worksheet, str): idx = self.list_worksheets(spreadsheet_id).index(worksheet) try: - return self.gspread_client.open_by_key(spreadsheet_id).get_worksheet( - idx - ) + return self.gspread_client.open_by_key(spreadsheet_id).get_worksheet(idx) except: # noqa: E722 raise ValueError(f"Couldn't find worksheet {worksheet}") @@ -117,19 +107,11 @@ def get_worksheet(self, spreadsheet_id, worksheet=0): worksheet = self._get_worksheet(spreadsheet_id, worksheet) tbl = Table(worksheet.get_all_values()) - logger.info(f"Retrieved worksheet with {tbl.num_rows} rows.") + logger.info(f'Retrieved worksheet with {tbl.num_rows} rows.') return tbl - def share_spreadsheet( - self, - spreadsheet_id, - sharee, - share_type="user", - role="reader", - notify=True, - notify_message=None, - with_link=False, - ): + def share_spreadsheet(self, spreadsheet_id, sharee, share_type='user', role='reader', + notify=True, notify_message=None, with_link=False): """ Share a spreadsheet with a user, group of users, domain and/or the public. @@ -154,15 +136,9 @@ def share_spreadsheet( """ spreadsheet = self.gspread_client.open_by_key(spreadsheet_id) - spreadsheet.share( - sharee, - share_type, - role, - notify=notify, - email_message=notify_message, - with_link=with_link, - ) - logger.info(f"Shared spreadsheet {spreadsheet_id}.") + spreadsheet.share(sharee, share_type, role, notify=notify, + email_message=notify_message, with_link=with_link) + logger.info(f'Shared spreadsheet {spreadsheet_id}.') def get_spreadsheet_permissions(self, spreadsheet_id): """ @@ -178,20 +154,19 @@ def get_spreadsheet_permissions(self, spreadsheet_id): spreadsheet = self.gspread_client.open_by_key(spreadsheet_id) tbl = Table(spreadsheet.list_permissions()) - logger.info(f"Retrieved permissions for {spreadsheet_id} spreadsheet.") + logger.info(f'Retrieved permissions for {spreadsheet_id} spreadsheet.') return tbl def create_spreadsheet(self, title, editor_email=None, folder_id=None): """ - Creates a new Google spreadsheet. Optionally shares the new doc with + Create a Google spreadsheet from a Parsons table. Optionally shares the new doc with the given email address. Optionally creates the sheet in a specified folder. `Args:` title: str The human-readable title of the new spreadsheet editor_email: str (optional) - Email address which should be given permissions on this spreadsheet. - Tip: You may want to share this file with the service account. + Email address which should be given permissions on this spreadsheet folder_id: str (optional) ID of the Google folder where the spreadsheet should be created. Tip: Get this from the folder URL. @@ -208,11 +183,11 @@ def create_spreadsheet(self, title, editor_email=None, folder_id=None): self.gspread_client.insert_permission( spreadsheet.id, editor_email, - perm_type="user", - role="writer", + perm_type='user', + role='writer', ) - logger.info(f"Created spreadsheet {spreadsheet.id}") + logger.info(f'Created spreadsheet {spreadsheet.id}') return spreadsheet.id def delete_spreadsheet(self, spreadsheet_id): @@ -224,7 +199,7 @@ def delete_spreadsheet(self, spreadsheet_id): The ID of the spreadsheet (Tip: Get this from the spreadsheet URL) """ self.gspread_client.del_spreadsheet(spreadsheet_id) - logger.info(f"Deleted spreadsheet {spreadsheet_id}") + logger.info(f'Deleted spreadsheet {spreadsheet_id}') def add_sheet(self, spreadsheet_id, title=None, rows=100, cols=25): """ @@ -245,12 +220,11 @@ def add_sheet(self, spreadsheet_id, title=None, rows=100, cols=25): spreadsheet = self.gspread_client.open_by_key(spreadsheet_id) spreadsheet.add_worksheet(title, rows, cols) sheet_count = len(spreadsheet.worksheets()) - logger.info("Created worksheet.") - return sheet_count - 1 + logger.info('Created worksheet.') + return (sheet_count-1) - def append_to_sheet( - self, spreadsheet_id, table, worksheet=0, user_entered_value=False, **kwargs - ): + def append_to_sheet(self, spreadsheet_id, table, worksheet=0, user_entered_value=False, + **kwargs): """ Append data from a Parsons table to a Google sheet. Note that the table's columns are ignored, as we'll be keeping whatever header row already exists in the Google sheet. @@ -269,9 +243,9 @@ def append_to_sheet( """ # This is in here to ensure backwards compatibility with previous versions of Parsons. - if "sheet_index" in kwargs: - worksheet = kwargs["sheet_index"] - logger.warning("Argument deprecated. Use worksheet instead.") + if 'sheet_index' in kwargs: + worksheet = kwargs['sheet_index'] + logger.warning('Argument deprecated. Use worksheet instead.') sheet = self._get_worksheet(spreadsheet_id, worksheet) @@ -282,9 +256,7 @@ def append_to_sheet( # If the existing sheet is blank, then just overwrite the table. if existing_table.num_rows == 0: - return self.overwrite_sheet( - spreadsheet_id, table, worksheet, user_entered_value - ) + return self.overwrite_sheet(spreadsheet_id, table, worksheet, user_entered_value) cells = [] for row_num, row in enumerate(table.data): @@ -293,17 +265,16 @@ def append_to_sheet( sheet_row_num = existing_table.num_rows + row_num + 2 cells.append(gspread.Cell(sheet_row_num, col_num + 1, row[col_num])) - value_input_option = "RAW" + value_input_option = 'RAW' if user_entered_value: - value_input_option = "USER_ENTERED" + value_input_option = 'USER_ENTERED' # Update the data in one batch sheet.update_cells(cells, value_input_option=value_input_option) - logger.info(f"Appended {table.num_rows} rows to worksheet.") + logger.info(f'Appended {table.num_rows} rows to worksheet.') - def overwrite_sheet( - self, spreadsheet_id, table, worksheet=0, user_entered_value=False, **kwargs - ): + def overwrite_sheet(self, spreadsheet_id, table, worksheet=0, user_entered_value=False, + **kwargs): """ Replace the data in a Google sheet with a Parsons table, using the table's columns as the first row. @@ -322,16 +293,16 @@ def overwrite_sheet( """ # This is in here to ensure backwards compatibility with previous versions of Parsons. - if "sheet_index" in kwargs: - worksheet = kwargs["sheet_index"] - logger.warning("Argument deprecated. Use worksheet instead.") + if 'sheet_index' in kwargs: + worksheet = kwargs['sheet_index'] + logger.warning('Argument deprecated. Use worksheet instead.') sheet = self._get_worksheet(spreadsheet_id, worksheet) sheet.clear() - value_input_option = "RAW" + value_input_option = 'RAW' if user_entered_value: - value_input_option = "USER_ENTERED" + value_input_option = 'USER_ENTERED' # Add header row sheet.append_row(table.columns, value_input_option=value_input_option) @@ -344,7 +315,7 @@ def overwrite_sheet( # Update the data in one batch sheet.update_cells(cells, value_input_option=value_input_option) - logger.info("Overwrote worksheet.") + logger.info('Overwrote worksheet.') def format_cells(self, spreadsheet_id, range, cell_format, worksheet=0): """ @@ -389,26 +360,26 @@ def format_cells(self, spreadsheet_id, range, cell_format, worksheet=0): } }, worksheet=0) - """ # noqa: E501,E261 + """ # noqa: E501,E261 ws = self._get_worksheet(spreadsheet_id, worksheet) ws.format(range, cell_format) - logger.info("Formatted worksheet") + logger.info('Formatted worksheet') def read_sheet(self, spreadsheet_id, sheet_index=0): # Deprecated method v0.14 of Parsons. - logger.warning("Deprecated method. Use get_worksheet() instead.") + logger.warning('Deprecated method. Use get_worksheet() instead.') return self.get_worksheet(spreadsheet_id, sheet_index) def read_sheet_with_title(self, spreadsheet_id, title): # Deprecated method v0.14 of Parsons. - logger.warning("Deprecated method. Use get_worksheet() instead.") + logger.warning('Deprecated method. Use get_worksheet() instead.') return self.get_worksheet(spreadsheet_id, title) def get_sheet_index_with_title(self, spreadsheet_id, title): # Deprecated method v0.14 of Parsons. - logger.warning("Deprecated method. Use get_worksheet_index instead.") + logger.warning('Deprecated method. Use get_worksheet_index instead.') return self.get_worksheet_index(spreadsheet_id, title) diff --git a/parsons/google/utitities.py b/parsons/google/utitities.py index abda0bf212..d104d9e56e 100644 --- a/parsons/google/utitities.py +++ b/parsons/google/utitities.py @@ -1,23 +1,17 @@ -import typing as t from parsons.utilities import files from parsons.utilities import check_env import json import os -def setup_google_application_credentials( - app_creds: t.Union[t.Dict, str, None], - env_var_name: str = "GOOGLE_APPLICATION_CREDENTIALS", -) -> None: - # Detect if app_creds is a dict, path string or json string, and if it is a +def setup_google_application_credentials(app_creds, env_var_name='GOOGLE_APPLICATION_CREDENTIALS'): + # Detect if the app_creds string is a path or json and if it is a # json string, then convert it to a temporary file. Then set the # environmental variable. credentials = check_env.check(env_var_name, app_creds) try: - if type(credentials) is dict: - credentials = json.dumps(credentials) - if json.loads(credentials): - creds_path = files.string_to_temp_file(credentials, suffix=".json") + json.loads(credentials) + creds_path = files.string_to_temp_file(credentials, suffix='.json') except ValueError: creds_path = credentials diff --git a/parsons/hustle/__init__.py b/parsons/hustle/__init__.py index eb94b534e9..2e441507ef 100644 --- a/parsons/hustle/__init__.py +++ b/parsons/hustle/__init__.py @@ -1,3 +1,5 @@ from parsons.hustle.hustle import Hustle -__all__ = ["Hustle"] +__all__ = [ + 'Hustle' +] diff --git a/parsons/hustle/column_map.py b/parsons/hustle/column_map.py index 75f1489009..a9a81b3e0e 100644 --- a/parsons/hustle/column_map.py +++ b/parsons/hustle/column_map.py @@ -1,9 +1,8 @@ # Dictionaries used to map column names to valid argument names -LEAD_COLUMN_MAP = { - "first_name": ["first", "fn"], - "last_name": ["last", "ln", "lastname"], - "phone_number": ["phone", "cell", "cellphone"], - "email": ["emailaddress"], - "follow_up": ["follow"], -} +LEAD_COLUMN_MAP = {'first_name': ['first', 'fn'], + 'last_name': ['last', 'ln', 'lastname'], + 'phone_number': ['phone', 'cell', 'cellphone'], + 'email': ['emailaddress'], + 'follow_up': ['follow'] + } diff --git a/parsons/hustle/hustle.py b/parsons/hustle/hustle.py index b1153a5e86..44328bc1ab 100644 --- a/parsons/hustle/hustle.py +++ b/parsons/hustle/hustle.py @@ -7,7 +7,7 @@ logger = logging.getLogger(__name__) -HUSTLE_URI = "https://api.hustle.com/v1/" +HUSTLE_URI = 'https://api.hustle.com/v1/' PAGE_LIMIT = 1000 @@ -29,27 +29,23 @@ class Hustle(object): def __init__(self, client_id, client_secret): self.uri = HUSTLE_URI - self.client_id = check_env.check("HUSTLE_CLIENT_ID", client_id) - self.client_secret = check_env.check("HUSTLE_CLIENT_SECRET", client_secret) + self.client_id = check_env.check('HUSTLE_CLIENT_ID', client_id) + self.client_secret = check_env.check('HUSTLE_CLIENT_SECRET', client_secret) self.token_expiration = None self._get_auth_token(client_id, client_secret) def _get_auth_token(self, client_id, client_secret): # Generate a temporary authorization token - data = { - "client_id": client_id, - "client_secret": client_secret, - "grant_type": "client_credentials", - } + data = {'client_id': client_id, + 'client_secret': client_secret, + 'grant_type': 'client_credentials'} - r = request("POST", self.uri + "oauth/token", data=data) + r = request('POST', self.uri + 'oauth/token', data=data) logger.debug(r.json()) - self.auth_token = r.json()["access_token"] - self.token_expiration = datetime.datetime.now() + datetime.timedelta( - seconds=7200 - ) + self.auth_token = r.json()['access_token'] + self.token_expiration = datetime.datetime.now() + datetime.timedelta(seconds=7200) logger.info("Authentication token generated") def _token_check(self): @@ -66,18 +62,16 @@ def _token_check(self): pass - def _request( - self, endpoint, req_type="GET", args=None, payload=None, raise_on_error=True - ): + def _request(self, endpoint, req_type='GET', args=None, payload=None, raise_on_error=True): url = self.uri + endpoint self._token_check() - headers = {"Authorization": f"Bearer {self.auth_token}"} + headers = {'Authorization': f'Bearer {self.auth_token}'} parameters = {} - if req_type == "GET": - parameters = {"limit": PAGE_LIMIT} + if req_type == 'GET': + parameters = {'limit': PAGE_LIMIT} if args: parameters.update(args) @@ -87,20 +81,20 @@ def _request( self._error_check(r, raise_on_error) # If a single item return the dict - if "items" not in r.json().keys(): + if 'items' not in r.json().keys(): return r.json() else: - result = r.json()["items"] + result = r.json()['items'] # Pagination - while r.json()["pagination"]["hasNextPage"] == "true": + while r.json()['pagination']['hasNextPage'] == 'true': - parameters["cursor"] = r.json["pagination"]["cursor"] + parameters['cursor'] = r.json['pagination']['cursor'] r = request(req_type, url, params=parameters, headers=headers) self._error_check(r, raise_on_error) - result.append(r.json()["items"]) + result.append(r.json()['items']) return result @@ -136,8 +130,8 @@ def get_agents(self, group_id): See :ref:`parsons-table` for output options. """ - tbl = Table(self._request(f"groups/{group_id}/agents")) - logger.info(f"Got {tbl.num_rows} agents from {group_id} group.") + tbl = Table(self._request(f'groups/{group_id}/agents')) + logger.info(f'Got {tbl.num_rows} agents from {group_id} group.') return tbl def get_agent(self, agent_id): @@ -151,13 +145,11 @@ def get_agent(self, agent_id): dict """ - r = self._request(f"agents/{agent_id}") - logger.info(f"Got {agent_id} agent.") + r = self._request(f'agents/{agent_id}') + logger.info(f'Got {agent_id} agent.') return r - def create_agent( - self, group_id, name, full_name, phone_number, send_invite=False, email=None - ): + def create_agent(self, group_id, name, full_name, phone_number, send_invite=False, email=None): """ Create an agent. @@ -178,21 +170,17 @@ def create_agent( dict """ - agent = { - "name": name, - "fullName": full_name, - "phoneNumber": phone_number, - "sendInvite": send_invite, - "email": email, - } + agent = {'name': name, + 'fullName': full_name, + 'phoneNumber': phone_number, + 'sendInvite': send_invite, + 'email': email} # Remove empty args in dictionary agent = json_format.remove_empty_keys(agent) - logger.info(f"Generating {full_name} agent.") - return self._request( - f"groups/{group_id}/agents", req_type="POST", payload=agent - ) + logger.info(f'Generating {full_name} agent.') + return self._request(f'groups/{group_id}/agents', req_type="POST", payload=agent) def update_agent(self, agent_id, name=None, full_name=None, send_invite=False): """ @@ -213,13 +201,15 @@ def update_agent(self, agent_id, name=None, full_name=None, send_invite=False): dict """ - agent = {"name": name, "fullName": full_name, "sendInvite": send_invite} + agent = {'name': name, + 'fullName': full_name, + 'sendInvite': send_invite} # Remove empty args in dictionary agent = json_format.remove_empty_keys(agent) - logger.info(f"Updating agent {agent_id}.") - return self._request(f"agents/{agent_id}", req_type="PUT", payload=agent) + logger.info(f'Updating agent {agent_id}.') + return self._request(f'agents/{agent_id}', req_type="PUT", payload=agent) def get_organizations(self): """ @@ -230,8 +220,8 @@ def get_organizations(self): See :ref:`parsons-table` for output options. """ - tbl = Table(self._request("organizations")) - logger.info(f"Got {tbl.num_rows} organizations.") + tbl = Table(self._request('organizations')) + logger.info(f'Got {tbl.num_rows} organizations.') return tbl def get_organization(self, organization_id): @@ -245,8 +235,8 @@ def get_organization(self, organization_id): dict """ - r = self._request(f"organizations/{organization_id}") - logger.info(f"Got {organization_id} organization.") + r = self._request(f'organizations/{organization_id}') + logger.info(f'Got {organization_id} organization.') return r def get_groups(self, organization_id): @@ -260,8 +250,8 @@ def get_groups(self, organization_id): See :ref:`parsons-table` for output options. """ - tbl = Table(self._request(f"organizations/{organization_id}/groups")) - logger.info(f"Got {tbl.num_rows} groups.") + tbl = Table(self._request(f'organizations/{organization_id}/groups')) + logger.info(f'Got {tbl.num_rows} groups.') return tbl def get_group(self, group_id): @@ -273,8 +263,8 @@ def get_group(self, group_id): The group id. """ - r = self._request(f"groups/{group_id}") - logger.info(f"Got {group_id} group.") + r = self._request(f'groups/{group_id}') + logger.info(f'Got {group_id} group.') return r def get_lead(self, lead_id): @@ -288,8 +278,8 @@ def get_lead(self, lead_id): dict """ - r = self._request(f"leads/{lead_id}") - logger.info(f"Got {lead_id} lead.") + r = self._request(f'leads/{lead_id}') + logger.info(f'Got {lead_id} lead.') return r def get_leads(self, organization_id=None, group_id=None): @@ -308,36 +298,24 @@ def get_leads(self, organization_id=None, group_id=None): """ if organization_id is None and group_id is None: - raise ValueError("Either organization_id or group_id required.") + raise ValueError('Either organization_id or group_id required.') if organization_id is not None and group_id is not None: - raise ValueError( - "Only one of organization_id and group_id may be populated." - ) + raise ValueError('Only one of organization_id and group_id may be populated.') if organization_id: - endpoint = f"organizations/{organization_id}/leads" - logger.info(f"Retrieving {organization_id} organization leads.") + endpoint = f'organizations/{organization_id}/leads' + logger.info(f'Retrieving {organization_id} organization leads.') if group_id: - endpoint = f"groups/{group_id}/leads" - logger.info(f"Retrieving {group_id} group leads.") + endpoint = f'groups/{group_id}/leads' + logger.info(f'Retrieving {group_id} group leads.') tbl = Table(self._request(endpoint)) - logger.info(f"Got {tbl.num_rows} leads.") + logger.info(f'Got {tbl.num_rows} leads.') return tbl - def create_lead( - self, - group_id, - phone_number, - first_name, - last_name=None, - email=None, - notes=None, - follow_up=None, - custom_fields=None, - tag_ids=None, - ): + def create_lead(self, group_id, phone_number, first_name, last_name=None, email=None, + notes=None, follow_up=None, custom_fields=None, tag_ids=None): """ Create a lead. @@ -366,21 +344,20 @@ def create_lead( ``None`` """ - lead = { - "firstName": first_name, - "lastName": last_name, - "email": email, - "phoneNumber": phone_number, - "notes": notes, - "followUp": follow_up, - "customFields": custom_fields, - "tagIds": tag_ids, - } + lead = {'firstName': first_name, + 'lastName': last_name, + 'email': email, + 'phoneNumber': phone_number, + 'notes': notes, + 'followUp': follow_up, + 'customFields': custom_fields, + 'tagIds': tag_ids + } # Remove empty args in dictionary lead = json_format.remove_empty_keys(lead) - logger.info(f"Generating lead for {first_name} {last_name}.") - return self._request(f"groups/{group_id}/leads", req_type="POST", payload=lead) + logger.info(f'Generating lead for {first_name} {last_name}.') + return self._request(f'groups/{group_id}/leads', req_type="POST", payload=lead) def create_leads(self, table, group_id=None): """ @@ -417,21 +394,14 @@ def create_leads(self, table, group_id=None): table.map_columns(LEAD_COLUMN_MAP) - arg_list = [ - "first_name", - "last_name", - "email", - "phone_number", - "follow_up", - "tag_ids", - "group_id", - ] + arg_list = ['first_name', 'last_name', 'email', 'phone_number', 'follow_up', + 'tag_ids', 'group_id'] created_leads = [] for row in table: - lead = {"group_id": group_id} + lead = {'group_id': group_id} custom_fields = {} # Check for column names that map to arguments, if not assign @@ -442,32 +412,21 @@ def create_leads(self, table, group_id=None): else: custom_fields[k] = v - lead["custom_fields"] = custom_fields + lead['custom_fields'] = custom_fields # Group Id check - if not group_id and "group_id" not in table.columns: - raise ValueError( - "Group Id must be passed as an argument or a column value." - ) + if not group_id and 'group_id' not in table.columns: + raise ValueError('Group Id must be passed as an argument or a column value.') if group_id: - lead["group_id"] = group_id + lead['group_id'] = group_id created_leads.append(self.create_lead(**lead)) logger.info(f"Created {table.num_rows} leads.") return Table(created_leads) - def update_lead( - self, - lead_id, - first_name=None, - last_name=None, - email=None, - global_opt_out=None, - notes=None, - follow_up=None, - tag_ids=None, - ): + def update_lead(self, lead_id, first_name=None, last_name=None, email=None, + global_opt_out=None, notes=None, follow_up=None, tag_ids=None): """ Update a lead. @@ -492,22 +451,20 @@ def update_lead( dict """ - lead = { - "leadId": lead_id, - "firstName": first_name, - "lastName": last_name, - "email": email, - "globalOptedOut": global_opt_out, - "notes": notes, - "followUp": follow_up, - "tagIds": tag_ids, - } + lead = {'leadId': lead_id, + 'firstName': first_name, + 'lastName': last_name, + 'email': email, + 'globalOptedOut': global_opt_out, + 'notes': notes, + 'followUp': follow_up, + 'tagIds': tag_ids} # Remove empty args in dictionary lead = json_format.remove_empty_keys(lead) - logger.info(f"Updating lead for {first_name} {last_name}.") - return self._request(f"leads/{lead_id}", req_type="PUT", payload=lead) + logger.info(f'Updating lead for {first_name} {last_name}.') + return self._request(f'leads/{lead_id}', req_type="PUT", payload=lead) def get_tags(self, organization_id): """ @@ -521,8 +478,8 @@ def get_tags(self, organization_id): See :ref:`parsons-table` for output options. """ - tbl = Table(self._request(f"organizations/{organization_id}/tags")) - logger.info(f"Got {tbl.num_rows} tags for {organization_id} organization.") + tbl = Table(self._request(f'organizations/{organization_id}/tags')) + logger.info(f'Got {tbl.num_rows} tags for {organization_id} organization.') return tbl def get_tag(self, tag_id): @@ -536,6 +493,6 @@ def get_tag(self, tag_id): dict """ - r = self._request(f"tags/{tag_id}") - logger.info(f"Got {tag_id} tag.") + r = self._request(f'tags/{tag_id}') + logger.info(f'Got {tag_id} tag.') return r diff --git a/parsons/mailchimp/__init__.py b/parsons/mailchimp/__init__.py index 249b88edb2..03bd942385 100644 --- a/parsons/mailchimp/__init__.py +++ b/parsons/mailchimp/__init__.py @@ -1,3 +1,5 @@ from parsons.mailchimp.mailchimp import Mailchimp -__all__ = ["Mailchimp"] +__all__ = [ + 'Mailchimp' +] diff --git a/parsons/mailchimp/mailchimp.py b/parsons/mailchimp/mailchimp.py index 9ddb065093..eeeee3ccbd 100644 --- a/parsons/mailchimp/mailchimp.py +++ b/parsons/mailchimp/mailchimp.py @@ -7,7 +7,7 @@ logger = logging.getLogger(__name__) -class Mailchimp: +class Mailchimp(): """ Instantiate Mailchimp Class @@ -20,25 +20,16 @@ class Mailchimp: """ def __init__(self, api_key=None): - self.api_key = check_env.check("MAILCHIMP_API_KEY", api_key) + self.api_key = check_env.check('MAILCHIMP_API_KEY', api_key) self.domain = re.findall("(?<=-).+$", self.api_key)[0] - self.uri = f"https://{self.domain}.api.mailchimp.com/3.0/" - self.client = APIConnector(self.uri, auth=("x", self.api_key)) + self.uri = f'https://{self.domain}.api.mailchimp.com/3.0/' + self.client = APIConnector(self.uri, auth=('x', self.api_key)) - def get_lists( - self, - fields=None, - exclude_fields=None, - count=None, - offset=None, - before_date_created=None, - since_date_created=None, - before_campaign_last_sent=None, - since_campaign_last_sent=None, - email=None, - sort_field=None, - sort_dir=None, - ): + def get_lists(self, fields=None, exclude_fields=None, + count=None, offset=None, before_date_created=None, + since_date_created=None, before_campaign_last_sent=None, + since_campaign_last_sent=None, email=None, sort_field=None, + sort_dir=None): """ Get a table of lists under the account based on query parameters. Note that argument descriptions here are sourced from Mailchimp's official @@ -83,46 +74,32 @@ def get_lists( `Returns:` Table Class """ - params = { - "fields": fields, - "exclude_fields": exclude_fields, - "count": count, - "offset": offset, - "before_date_created": before_date_created, - "since_date_created": since_date_created, - "before_campaign_last_sent": before_campaign_last_sent, - "since_campaign_last_sent": since_campaign_last_sent, - "email": email, - "sort_field": sort_field, - "sort_dir": sort_dir, - } + params = {'fields': fields, + 'exclude_fields': exclude_fields, + 'count': count, + 'offset': offset, + 'before_date_created': before_date_created, + 'since_date_created': since_date_created, + 'before_campaign_last_sent': before_campaign_last_sent, + 'since_campaign_last_sent': since_campaign_last_sent, + 'email': email, + 'sort_field': sort_field, + 'sort_dir': sort_dir} - response = self.client.get_request("lists", params=params) - tbl = Table(response["lists"]) - logger.info(f"Found {tbl.num_rows} lists.") + response = self.client.get_request('lists', params=params) + tbl = Table(response['lists']) + logger.info(f'Found {tbl.num_rows} lists.') if tbl.num_rows > 0: return tbl else: return Table() - def get_campaigns( - self, - fields=None, - exclude_fields=None, - count=None, - offset=None, - type=None, - status=None, - before_send_time=None, - since_send_time=None, - before_create_time=None, - since_create_time=None, - list_id=None, - folder_id=None, - member_id=None, - sort_field=None, - sort_dir=None, - ): + def get_campaigns(self, fields=None, exclude_fields=None, + count=None, offset=None, type=None, status=None, + before_send_time=None, since_send_time=None, + before_create_time=None, since_create_time=None, + list_id=None, folder_id=None, member_id=None, + sort_field=None, sort_dir=None): """ Get a table of campaigns under the account based on query parameters. Note that argument descriptions here are sourced from Mailchimp's @@ -176,55 +153,39 @@ def get_campaigns( `Returns:` Table Class """ - params = { - "fields": fields, - "exclude_fields": exclude_fields, - "count": count, - "offset": offset, - "type": type, - "status": status, - "before_send_time": before_send_time, - "since_send_time": since_send_time, - "before_create_time": before_create_time, - "since_create_time": since_create_time, - "list_id": list_id, - "folder_id": folder_id, - "member_id": member_id, - "sort_field": sort_field, - "sort_dir": sort_dir, - } + params = {'fields': fields, + 'exclude_fields': exclude_fields, + 'count': count, + 'offset': offset, + 'type': type, + 'status': status, + 'before_send_time': before_send_time, + 'since_send_time': since_send_time, + 'before_create_time': before_create_time, + 'since_create_time': since_create_time, + 'list_id': list_id, + 'folder_id': folder_id, + 'member_id': member_id, + 'sort_field': sort_field, + 'sort_dir': sort_dir} - response = self.client.get_request("campaigns", params=params) - tbl = Table(response["campaigns"]) - logger.info(f"Found {tbl.num_rows} campaigns.") + response = self.client.get_request('campaigns', params=params) + tbl = Table(response['campaigns']) + logger.info(f'Found {tbl.num_rows} campaigns.') if tbl.num_rows > 0: return tbl else: return Table() - def get_members( - self, - list_id, - fields=None, - exclude_fields=None, - count=None, - offset=None, - email_type=None, - status=None, - since_timestamp_opt=None, - before_timestamp_opt=None, - since_last_changed=None, - before_last_changed=None, - unique_email_id=None, - vip_only=False, - interest_category_id=None, - interest_ids=None, - interest_match=None, - sort_field=None, - sort_dir=None, - since_last_campaign=None, - unsubscribed_since=None, - ): + def get_members(self, list_id, fields=None, + exclude_fields=None, count=None, offset=None, + email_type=None, status=None, since_timestamp_opt=None, + before_timestamp_opt=None, since_last_changed=None, + before_last_changed=None, unique_email_id=None, + vip_only=False, interest_category_id=None, + interest_ids=None, interest_match=None, sort_field=None, + sort_dir=None, since_last_campaign=None, + unsubscribed_since=None): """ Get a table of members in a list based on query parameters. Note that argument descriptions here are sourced from Mailchimp's official API @@ -305,45 +266,37 @@ def get_members( `Returns:` Table Class """ - params = { - "fields": fields, - "exclude_fields": exclude_fields, - "count": count, - "offset": offset, - "email_type": email_type, - "status": status, - "since_timestamp_opt": since_timestamp_opt, - "before_timestamp_opt": before_timestamp_opt, - "since_last_changed": since_last_changed, - "before_last_changed": before_last_changed, - "unqiue_email_id": unique_email_id, - "vip_only": vip_only, - "interest_category_id": interest_category_id, - "interest_ids": interest_ids, - "interest_match": interest_match, - "sort_field": sort_field, - "sort_dir": sort_dir, - "since_last_campaign": since_last_campaign, - "unsubscribed_since": unsubscribed_since, - } + params = {'fields': fields, + 'exclude_fields': exclude_fields, + 'count': count, + 'offset': offset, + 'email_type': email_type, + 'status': status, + 'since_timestamp_opt': since_timestamp_opt, + 'before_timestamp_opt': before_timestamp_opt, + 'since_last_changed': since_last_changed, + 'before_last_changed': before_last_changed, + 'unqiue_email_id': unique_email_id, + 'vip_only': vip_only, + 'interest_category_id': interest_category_id, + 'interest_ids': interest_ids, + 'interest_match': interest_match, + 'sort_field': sort_field, + 'sort_dir': sort_dir, + 'since_last_campaign': since_last_campaign, + 'unsubscribed_since': unsubscribed_since} - response = self.client.get_request(f"lists/{list_id}/members", params=params) - tbl = Table(response["members"]) - logger.info(f"Found {tbl.num_rows} members.") + response = self.client.get_request(f'lists/{list_id}/members', params=params) + tbl = Table(response['members']) + logger.info(f'Found {tbl.num_rows} members.') if tbl.num_rows > 0: return tbl else: return Table() - def get_campaign_emails( - self, - campaign_id, - fields=None, - exclude_fields=None, - count=None, - offset=None, - since=None, - ): + def get_campaign_emails(self, campaign_id, fields=None, + exclude_fields=None, count=None, offset=None, + since=None): """ Get a table of individual emails from a campaign based on query parameters. Note that argument descriptions here are sourced from @@ -373,26 +326,22 @@ def get_campaign_emails( `Returns:` Table Class """ - params = { - "fields": fields, - "exclude_fields": exclude_fields, - "count": count, - "offset": offset, - "since": since, - } + params = {'fields': fields, + 'exclude_fields': exclude_fields, + 'count': count, + 'offset': offset, + 'since': since} - response = self.client.get_request( - f"reports/{campaign_id}/email-activity", params=params - ) - tbl = Table(response["emails"]) + response = self.client.get_request(f'reports/{campaign_id}/email-activity', + params=params) + tbl = Table(response['emails']) if tbl.num_rows > 0: return tbl else: return Table() - def get_unsubscribes( - self, campaign_id, fields=None, exclude_fields=None, count=None, offset=None - ): + def get_unsubscribes(self, campaign_id, fields=None, + exclude_fields=None, count=None, offset=None): """ Get a table of unsubscribes associated with a campaign based on query parameters. Note that argument descriptions here are sourced from @@ -418,18 +367,15 @@ def get_unsubscribes( `Returns:` Table Class """ - params = { - "fields": fields, - "exclude_fields": exclude_fields, - "count": count, - "offset": offset, - } + params = {'fields': fields, + 'exclude_fields': exclude_fields, + 'count': count, + 'offset': offset} - response = self.client.get_request( - f"reports/{campaign_id}/unsubscribed", params=params - ) - tbl = Table(response["unsubscribes"]) - logger.info(f"Found {tbl.num_rows} unsubscribes for {campaign_id}.") + response = self.client.get_request(f'reports/{campaign_id}/unsubscribed', + params=params) + tbl = Table(response['unsubscribes']) + logger.info(f'Found {tbl.num_rows} unsubscribes for {campaign_id}.') if tbl.num_rows > 0: return tbl else: diff --git a/parsons/mobilize_america/__init__.py b/parsons/mobilize_america/__init__.py index 8e3b245e28..2d4105fb07 100644 --- a/parsons/mobilize_america/__init__.py +++ b/parsons/mobilize_america/__init__.py @@ -1,3 +1,5 @@ from parsons.mobilize_america.ma import MobilizeAmerica -__all__ = ["MobilizeAmerica"] +__all__ = [ + 'MobilizeAmerica' +] diff --git a/parsons/mobilize_america/ma.py b/parsons/mobilize_america/ma.py index 43e05d217a..6293bbab8e 100644 --- a/parsons/mobilize_america/ma.py +++ b/parsons/mobilize_america/ma.py @@ -5,11 +5,10 @@ import re import os import logging -import collections.abc logger = logging.getLogger(__name__) -MA_URI = "https://api.mobilize.us/v1/" +MA_URI = 'http://events.mobilizeamerica.io/api/v1/' class MobilizeAmerica(object): @@ -26,63 +25,62 @@ class MobilizeAmerica(object): def __init__(self, api_key=None): self.uri = MA_URI - self.api_key = api_key or os.environ.get("MOBILIZE_AMERICA_API_KEY") + self.api_key = api_key or os.environ.get('MOBILIZE_AMERICA_API_KEY') if not self.api_key: - logger.info( - "Mobilize America API Key missing. Calling methods that rely on private" - " endpoints will fail." - ) + logger.info('Mobilize America API Key missing. Calling methods that rely on private' + ' endpoints will fail.') - def _request(self, url, req_type="GET", post_data=None, args=None, auth=False): + def _request(self, url, req_type='GET', post_data=None, args=None, auth=False): if auth: if not self.api_key: - raise TypeError("This method requires an api key.") + raise TypeError('This method requires an api key.') else: - header = {"Authorization": "Bearer " + self.api_key} + header = {'Authorization': 'Bearer ' + self.api_key} else: header = None r = _request(req_type, url, json=post_data, params=args, headers=header) - r.raise_for_status() - - if "error" in r.json(): - raise ValueError("API Error:" + str(r.json()["error"])) + if 'error' in r.json(): + raise ValueError('API Error:' + str(r.json()['error'])) return r - def _request_paginate(self, url, req_type="GET", args=None, auth=False): + def _request_paginate(self, url, req_type='GET', args=None, auth=False): r = self._request(url, req_type=req_type, args=args, auth=auth) - json = r.json()["data"] + json = r.json()['data'] - while r.json()["next"]: + while r.json()['next']: - r = self._request(r.json()["next"], req_type=req_type, auth=auth) - json.extend(r.json()["data"]) + r = self._request(r.json()['next'], req_type=req_type) + json.extend(r.json()['data']) return json def _time_parse(self, time_arg): # Parse the date filters - trans = [(">=", "gte_"), (">", "gt_"), ("<=", "lte_"), ("<", "lt_")] + trans = [('>=', 'gte_'), + ('>', 'gt_'), + ('<=', 'lte_'), + ('<', 'lt_')] if time_arg: - time = re.sub("<=|<|>=|>", "", time_arg) + time = re.sub('<=|<|>=|>', '', time_arg) time = date_to_timestamp(time) - time_filter = re.search("<=|<|>=|>", time_arg).group() + time_filter = re.search('<=|<|>=|>', time_arg).group() for i in trans: if time_filter == i[0]: return i[1] + str(time) - raise ValueError("Invalid time operator. Must be one of >=, >, <= or >.") + raise ValueError('Invalid time operator. Must be one of >=, >, <= or >.') return time_arg @@ -98,40 +96,13 @@ def get_organizations(self, updated_since=None): See :ref:`parsons-table` for output options. """ - return Table( - self._request_paginate( - self.uri + "organizations", - args={"updated_since": date_to_timestamp(updated_since)}, - ) - ) - - def get_promoted_organizations(self, organization_id): - """ - Return all organizations promoted by the given organization. + return Table(self._request_paginate(self.uri + 'organizations', + args={ + 'updated_since': date_to_timestamp(updated_since) + })) - `Args:` - organization_id: int - ID of the organization to query. - `Returns` - Parsons Table - """ - url = ( - self.uri - + "organizations/" - + str(organization_id) - + "/promoted_organizations" - ) - return Table(self._request_paginate(url, auth=True)) - - def get_events( - self, - organization_id=None, - updated_since=None, - timeslot_start=None, - timeslot_end=None, - timeslots_table=False, - max_timeslots=None, - ): + def get_events(self, organization_id=None, updated_since=None, timeslot_start=None, + timeslot_end=None, timeslots_table=False, max_timeslots=None): """ Fetch all public events on the platform. @@ -162,58 +133,44 @@ def get_events( This is helpful in situations where you have a regular sync running and want to ensure that the column headers remain static. - If ``max_timeslots`` is 0, no timeslot columns will be included. - `Returns` - :ref:`parsons.Table `, dict, list[:ref:`parsons.Table `] + Parsons Table or dict or Parsons Tables + See :ref:`parsons-table` for output options. """ if isinstance(organization_id, (str, int)): organization_id = [organization_id] - args = { - "organization_id": organization_id, - "updated_since": date_to_timestamp(updated_since), - "timeslot_start": self._time_parse(timeslot_start), - "timeslot_end": self._time_parse(timeslot_end), - } + args = {'organization_id': organization_id, + 'updated_since': date_to_timestamp(updated_since), + 'timeslot_start': self._time_parse(timeslot_start), + 'timeslot_end': self._time_parse(timeslot_end)} - tbl = Table(self._request_paginate(self.uri + "events", args=args)) + tbl = Table(self._request_paginate(self.uri + 'events', args=args)) if tbl.num_rows > 0: - tbl.unpack_dict("sponsor") - tbl.unpack_dict("location", prepend=False) - tbl.unpack_dict("location", prepend=False) # Intentional duplicate - tbl.table = petl.convert(tbl.table, "address_lines", lambda v: " ".join(v)) + tbl.unpack_dict('sponsor') + tbl.unpack_dict('location', prepend=False) + tbl.unpack_dict('location', prepend=False) # Intentional duplicate + tbl.table = petl.convert(tbl.table, 'address_lines', lambda v: ' '.join(v)) if timeslots_table: - timeslots_tbl = tbl.long_table(["id"], "timeslots", "event_id") - return {"events": tbl, "timeslots": timeslots_tbl} - - elif max_timeslots == 0: - tbl.remove_column("timeslots") + timeslots_tbl = tbl.long_table(['id'], 'timeslots', 'event_id') + return {'events': tbl, 'timeslots': timeslots_tbl} else: - tbl.unpack_list("timeslots", replace=True, max_columns=max_timeslots) + tbl.unpack_list('timeslots', replace=True, max_columns=max_timeslots) cols = tbl.columns for c in cols: - if re.search("timeslots", c, re.IGNORECASE) is not None: + if re.search('timeslots', c, re.IGNORECASE) is not None: tbl.unpack_dict(c) - tbl.materialize() return tbl - def get_events_organization( - self, - organization_id, - updated_since=None, - timeslot_start=None, - timeslot_end=None, - timeslots_table=False, - max_timeslots=None, - ): + def get_events_organization(self, organization_id=None, updated_since=None, timeslot_start=None, + timeslot_end=None, timeslots_table=False, max_timeslots=None): """ Fetch all public events for an organization. This includes both events owned by the organization (as indicated by the organization field on the event object) @@ -223,8 +180,8 @@ def get_events_organization( API Key Required `Args:` - organization_id: int or str - Organization ID for the organization. + organization_id: list or int + Filter events by a single or multiple organization ids updated_since: str Filter to events updated since given date (ISO Date) timeslot_start: str @@ -269,48 +226,40 @@ def get_events_organization( This is helpful in situations where you have a regular sync running and want to ensure that the column headers remain static. - If ``max_timeslots`` is 0, no timeslot columns will be included. - `Returns` - :ref:`parsons.Table `, dict, list[:ref:`parsons.Table `] + Parsons Table or dict or Parsons Tables + See :ref:`parsons-table` for output options. """ - args = { - "updated_since": date_to_timestamp(updated_since), - "timeslot_start": self._time_parse(timeslot_start), - "timeslot_end": self._time_parse(timeslot_end), - } + if isinstance(organization_id, (str, int)): + organization_id = [organization_id] + + args = {'organization_id': organization_id, + 'updated_since': date_to_timestamp(updated_since), + 'timeslot_start': self._time_parse(timeslot_start), + 'timeslot_end': self._time_parse(timeslot_end), + } - tbl = Table( - self._request_paginate( - self.uri + "organizations/" + str(organization_id) + "/events", - args=args, - auth=True, - ) - ) + tbl = Table(self._request_paginate(self.uri + 'events', args=args, auth=True)) if tbl.num_rows > 0: - tbl.unpack_dict("sponsor") - tbl.unpack_dict("location", prepend=False) - tbl.unpack_dict("location", prepend=False) # Intentional duplicate - tbl.table = petl.convert(tbl.table, "address_lines", lambda v: " ".join(v)) + tbl.unpack_dict('sponsor') + tbl.unpack_dict('location', prepend=False) + tbl.unpack_dict('location', prepend=False) # Intentional duplicate + tbl.table = petl.convert(tbl.table, 'address_lines', lambda v: ' '.join(v)) if timeslots_table: - timeslots_tbl = tbl.long_table(["id"], "timeslots", "event_id") - return {"events": tbl, "timeslots": timeslots_tbl} - - elif max_timeslots == 0: - tbl.remove_column("timeslots") + timeslots_tbl = tbl.long_table(['id'], 'timeslots', 'event_id') + return {'events': tbl, 'timeslots': timeslots_tbl} else: - tbl.unpack_list("timeslots", replace=True, max_columns=max_timeslots) + tbl.unpack_list('timeslots', replace=True, max_columns=max_timeslots) cols = tbl.columns for c in cols: - if re.search("timeslots", c, re.IGNORECASE) is not None: + if re.search('timeslots', c, re.IGNORECASE) is not None: tbl.unpack_dict(c) - tbl.materialize() return tbl @@ -331,40 +280,33 @@ def get_events_deleted(self, organization_id=None, updated_since=None): if isinstance(organization_id, (str, int)): organization_id = [organization_id] - args = { - "organization_id": organization_id, - "updated_since": date_to_timestamp(updated_since), - } + args = {'organization_id': organization_id, + 'updated_since': date_to_timestamp(updated_since)} - return Table(self._request_paginate(self.uri + "events/deleted", args=args)) + return Table(self._request_paginate(self.uri + 'events/deleted', args=args)) - def get_people(self, organization_id, updated_since=None): + def get_people(self, organization_id=None, updated_since=None): """ - Fetch all people (volunteers) who are affiliated with an organization(s). + Fetch all people (volunteers) who are affiliated with the organization. .. note:: API Key Required `Args:` - organization_id: Iterable or int - Request people associated with a single or multiple organization ids + organization_id: list of int + Filter events by a single or multiple organization ids updated_since: str - Filter to people updated since given date (ISO Date) + Filter to events updated since given date (ISO Date) `Returns` Parsons Table See :ref:`parsons-table` for output options. """ - if isinstance(organization_id, collections.abc.Iterable): - data = Table() - for id in organization_id: - data.concat(self.get_people(id, updated_since)) - return data - else: - url = self.uri + "organizations/" + str(organization_id) + "/people" - args = {"updated_since": date_to_timestamp(updated_since)} - return Table(self._request_paginate(url, args=args, auth=True)) - def get_attendances(self, organization_id, updated_since=None): + url = self.uri + 'organizations/' + str(organization_id) + '/people' + args = {'updated_since': date_to_timestamp(updated_since)} + return Table(self._request_paginate(url, args=args, auth=True)) + + def get_attendances(self, organization_id=None, updated_since=None): """ Fetch all attendances which were either promoted by the organization or were for events owned by the organization. @@ -373,14 +315,15 @@ def get_attendances(self, organization_id, updated_since=None): API Key Required `Args:` - organization_id: int - Filter attendances by an organization id + organization_id: list of int + Filter events by a single or multiple organization ids updated_since: str - Filter to attendances updated since given date (ISO Date) + Filter to events updated since given date (ISO Date) `Returns` Parsons Table See :ref:`parsons-table` for output options. """ - url = self.uri + "organizations/" + str(organization_id) + "/attendances" - args = {"updated_since": date_to_timestamp(updated_since)} + + url = self.uri + 'organizations/' + str(organization_id) + '/attendances' + args = {'updated_since': date_to_timestamp(updated_since)} return Table(self._request_paginate(url, args=args, auth=True)) diff --git a/parsons/nation_builder/__init__.py b/parsons/nation_builder/__init__.py deleted file mode 100644 index 3520433033..0000000000 --- a/parsons/nation_builder/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from parsons.nation_builder.nation_builder import NationBuilder - -__all__ = ["NationBuilder"] diff --git a/parsons/nation_builder/nation_builder.py b/parsons/nation_builder/nation_builder.py deleted file mode 100644 index 3b30d4fa00..0000000000 --- a/parsons/nation_builder/nation_builder.py +++ /dev/null @@ -1,221 +0,0 @@ -import json -import logging -import time -from typing import Any, Dict, Optional, Tuple, cast -from urllib.parse import parse_qs, urlparse - -from parsons import Table -from parsons.utilities import check_env -from parsons.utilities.api_connector import APIConnector - -logger = logging.getLogger(__name__) - - -class NationBuilder: - """ - Instantiate the NationBuilder class - - `Args:` - slug: str - The Nation Builder slug Not required if ``NB_SLUG`` env variable set. The slug is the - nation slug of the nation from which your application is requesting approval to retrieve - data via the NationBuilder API. For example, your application's user could provide this - slug via a text field in your application. - access_token: str - The Nation Builder access_token Not required if ``NB_ACCESS_TOKEN`` env variable set. - """ - - def __init__( - self, slug: Optional[str] = None, access_token: Optional[str] = None - ) -> None: - slug = check_env.check("NB_SLUG", slug) - token = check_env.check("NB_ACCESS_TOKEN", access_token) - - headers = {"Content-Type": "application/json", "Accept": "application/json"} - headers.update(NationBuilder.get_auth_headers(token)) - - self.client = APIConnector(NationBuilder.get_uri(slug), headers=headers) - - @classmethod - def get_uri(cls, slug: Optional[str]) -> str: - if slug is None: - raise ValueError("slug can't None") - - if not isinstance(slug, str): - raise ValueError("slug must be an str") - - if len(slug.strip()) == 0: - raise ValueError("slug can't be an empty str") - - return f"https://{slug}.nationbuilder.com/api/v1" - - @classmethod - def get_auth_headers(cls, access_token: Optional[str]) -> Dict[str, str]: - if access_token is None: - raise ValueError("access_token can't None") - - if not isinstance(access_token, str): - raise ValueError("access_token must be an str") - - if len(access_token.strip()) == 0: - raise ValueError("access_token can't be an empty str") - - return {"authorization": f"Bearer {access_token}"} - - @classmethod - def parse_next_params(cls, next_value: str) -> Tuple[str, str]: - next_params = parse_qs(urlparse(next_value).query) - - if "__nonce" not in next_params: - raise ValueError("__nonce param not found") - - if "__token" not in next_params: - raise ValueError("__token param not found") - - nonce = next_params["__nonce"][0] - token = next_params["__token"][0] - - return nonce, token - - @classmethod - def make_next_url(cls, original_url: str, nonce: str, token: str) -> str: - return f"{original_url}?limit=100&__nonce={nonce}&__token={token}" - - def get_people(self) -> Table: - """ - `Returns:` - A Table of all people stored in Nation Builder. - """ - data = [] - original_url = "people" - - url = f"{original_url}" - - while True: - try: - logging.debug("sending request %s" % url) - response = self.client.get_request(url) - - res = response.get("results", None) - - if res is None: - break - - logging.debug("response got %s records" % len(res)) - - data.extend(res) - - if response.get("next", None): - nonce, token = NationBuilder.parse_next_params(response["next"]) - url = NationBuilder.make_next_url(original_url, nonce, token) - else: - break - except Exception as error: - logging.error("error requesting data from Nation Builder: %s" % error) - - wait_time = 30 - logging.info("waiting %d seconds before retrying" % wait_time) - time.sleep(wait_time) - - return Table(data) - - def update_person(self, person_id: str, person: Dict[str, Any]) -> Dict[str, Any]: - """ - This method updates a person with the provided id to have the provided data. It returns a - full representation of the updated person. - - `Args:` - person_id: str - Nation Builder person id. - data: dict - Nation builder person object. - For example {"email": "user@example.com", "tags": ["foo", "bar"]} - Docs: https://nationbuilder.com/people_api - `Returns:` - A person object with the updated data. - """ - if person_id is None: - raise ValueError("person_id can't None") - - if not isinstance(person_id, str): - raise ValueError("person_id must be a str") - - if len(person_id.strip()) == 0: - raise ValueError("person_id can't be an empty str") - - if not isinstance(person, dict): - raise ValueError("person must be a dict") - - url = f"people/{person_id}" - response = self.client.put_request(url, data=json.dumps({"person": person})) - response = cast(Dict[str, Any], response) - - return response - - def upsert_person( - self, person: Dict[str, Any] - ) -> Tuple[bool, Optional[Dict[str, Any]]]: - """ - Updates a matched person or creates a new one if the person doesn't exist. - - This method attempts to match the input person resource to a person already in the - nation. If a match is found, the matched person is updated. If a match is not found, a new - person is created. Matches are found by including one of the following IDs in the request: - - - civicrm_id - - county_file_id - - dw_id - - external_id - - email - - facebook_username - - ngp_id - - salesforce_id - - twitter_login - - van_id - - `Args:` - data: dict - Nation builder person object. - For example {"email": "user@example.com", "tags": ["foo", "bar"]} - Docs: https://nationbuilder.com/people_api - `Returns:` - A tuple of `created` and `person` object with the updated data. If the request fails - the method will return a tuple of `False` and `None`. - """ - - _required_keys = [ - "civicrm_id", - "county_file_id", - "dw_id", - "external_id", - "email", - "facebook_username", - "ngp_id", - "salesforce_id", - "twitter_login", - "van_id", - ] - - if not isinstance(person, dict): - raise ValueError("person must be a dict") - - has_required_key = any(x in person for x in _required_keys) - - if not has_required_key: - _keys = ", ".join(_required_keys) - raise ValueError(f"person dict must contain at least one key of {_keys}") - - url = "people/push" - response = self.client.request(url, "PUT", data=json.dumps({"person": person})) - - self.client.validate_response(response) - - if response.status_code == 200: - if self.client.json_check(response): - return (False, response.json()) - - if response.status_code == 201: - if self.client.json_check(response): - return (True, response.json()) - - return (False, None) diff --git a/parsons/newmode/__init__.py b/parsons/newmode/__init__.py index 161b0a1cc6..94f9007b30 100644 --- a/parsons/newmode/__init__.py +++ b/parsons/newmode/__init__.py @@ -1,3 +1,5 @@ from parsons.newmode.newmode import Newmode -__all__ = ["Newmode"] +__all__ = [ + 'Newmode' +] diff --git a/parsons/newmode/newmode.py b/parsons/newmode/newmode.py index 7b342528f5..a6f0e1323c 100644 --- a/parsons/newmode/newmode.py +++ b/parsons/newmode/newmode.py @@ -7,6 +7,7 @@ class Newmode: + def __init__(self, api_user=None, api_password=None, api_version=None): """ Args: @@ -22,13 +23,13 @@ def __init__(self, api_user=None, api_password=None, api_version=None): Returns: Newmode class """ - self.api_user = check_env.check("NEWMODE_API_USER", api_user) - self.api_password = check_env.check("NEWMODE_API_PASSWORD", api_password) + self.api_user = check_env.check('NEWMODE_API_USER', api_user) + self.api_password = check_env.check('NEWMODE_API_PASSWORD', api_password) if api_version is None: api_version = "v1.0" - self.api_version = check_env.check("NEWMODE_API_VERSION", api_version) + self.api_version = check_env.check('NEWMODE_API_VERSION', api_version) self.client = Client(api_user, api_password, api_version) @@ -98,7 +99,7 @@ def lookup_targets(self, tool_id, search=None, params={}): if targets: data = [] for key in targets: - if key != "_links": + if key != '_links': data.append(targets[key]) return self.convert_to_table(data) else: @@ -139,10 +140,10 @@ def run_action(self, tool_id, payload, params={}): """ action = self.client.runAction(tool_id, payload, params=params) if action: - if "link" in action: - return action["link"] + if 'link' in action: + return action['link'] else: - return action["sid"] + return action['sid'] else: logging.warning("Error in response") return None @@ -165,27 +166,6 @@ def get_target(self, target_id, params={}): logging.warning("Empty target returned") return None - def get_targets(self, params={}): - """ - Get all targets - - Args: - params dict: - Extra paramaters sent to New/Mode library - - Returns: - Target information - """ - - targets = self.client.getTargets(params=params) - - if targets: - return self.convert_to_table(targets) - - else: - logging.warning("No targets returned") - return None - def get_campaigns(self, params={}): """ Get existing campaigns. diff --git a/parsons/ngpvan/__init__.py b/parsons/ngpvan/__init__.py index 776ed434d0..146a0c5807 100644 --- a/parsons/ngpvan/__init__.py +++ b/parsons/ngpvan/__init__.py @@ -1,3 +1,5 @@ from parsons.ngpvan.van import VAN -__all__ = ["VAN"] +__all__ = [ + 'VAN' +] diff --git a/parsons/ngpvan/activist_codes.py b/parsons/ngpvan/activist_codes.py index fb6904d861..f30724d34e 100644 --- a/parsons/ngpvan/activist_codes.py +++ b/parsons/ngpvan/activist_codes.py @@ -8,6 +8,7 @@ class ActivistCodes(object): + def __init__(self, van_connection): self.connection = van_connection @@ -21,8 +22,8 @@ def get_activist_codes(self): See :ref:`parsons-table` for output options. """ - tbl = Table(self.connection.get_request("activistCodes")) - logger.info(f"Found {tbl.num_rows} activist codes.") + tbl = Table(self.connection.get_request('activistCodes')) + logger.info(f'Found {tbl.num_rows} activist codes.') return tbl def get_activist_code(self, activist_code_id): @@ -37,35 +38,26 @@ def get_activist_code(self, activist_code_id): The activist code """ - r = self.connection.get_request(f"activistCodes/{activist_code_id}") - logger.info(f"Found activist code {activist_code_id}.") + r = self.connection.get_request(f'activistCodes/{activist_code_id}') + logger.info(f'Found activist code {activist_code_id}.') return r - def toggle_activist_code( - self, id, activist_code_id, action, id_type="vanid", omit_contact=True - ): + def toggle_activist_code(self, id, activist_code_id, action, id_type='vanid'): # Internal method to apply/remove activist codes. Was previously a public method, - # but for the sake of simplicity, breaking out into two public methods. + # but for the sake of simplicity, breaking out into two public methods. - response = { - "activistCodeId": activist_code_id, - "action": action_parse(action), - "type": "activistCode", - "omitActivistCodeContactHistory": omit_contact, - } + response = {"activistCodeId": activist_code_id, + "action": action_parse(action), + "type": "activistCode"} - r = self.apply_response(id, response, id_type, omit_contact=omit_contact) + r = self.apply_response(id, response, id_type) - logger.info( - f"{id_type.upper()} {id} {action.capitalize()} " - + f"activist code {activist_code_id}" - ) + logger.info(f'{id_type.upper()} {id} {action.capitalize()} ' + + f'activist code {activist_code_id}') return r - def apply_activist_code( - self, id, activist_code_id, id_type="vanid", omit_contact=True - ): + def apply_activist_code(self, id, activist_code_id, id_type='vanid'): """ Apply an activist code to or from a person. @@ -79,18 +71,13 @@ def apply_activist_code( id_type: str A known person identifier type available on this VAN instance such as ``dwid`` - omit_contact: boolean - If set to false the contact history will be updated with a contact - attempt. Returns: ``None`` """ - return self.toggle_activist_code( - id, activist_code_id, "Apply", id_type=id_type, omit_contact=omit_contact - ) + return self.toggle_activist_code(id, activist_code_id, 'Apply', id_type=id_type) - def remove_activist_code(self, id, activist_code_id, id_type="vanid"): + def remove_activist_code(self, id, activist_code_id, id_type='vanid'): """ Remove an activist code to or from a person. @@ -108,6 +95,4 @@ def remove_activist_code(self, id, activist_code_id, id_type="vanid"): ``None`` """ - return self.toggle_activist_code( - id, activist_code_id, "Remove", id_type=id_type - ) + return self.toggle_activist_code(id, activist_code_id, 'Remove', id_type=id_type) diff --git a/parsons/ngpvan/bulk_import.py b/parsons/ngpvan/bulk_import.py index c0057f0678..944b754138 100644 --- a/parsons/ngpvan/bulk_import.py +++ b/parsons/ngpvan/bulk_import.py @@ -10,7 +10,9 @@ class BulkImport(object): + def __init__(self): + pass def get_bulk_import_resources(self): @@ -24,8 +26,8 @@ def get_bulk_import_resources(self): A list of resources. """ - r = self.connection.get_request("bulkImportJobs/resources") - logger.info(f"Found {len(r)} bulk import resources.") + r = self.connection.get_request('bulkImportJobs/resources') + logger.info(f'Found {len(r)} bulk import resources.') return r def get_bulk_import_job(self, job_id): @@ -40,8 +42,8 @@ def get_bulk_import_job(self, job_id): The bulk import job """ - r = self.connection.get_request(f"bulkImportJobs/{job_id}") - logger.info(f"Found bulk import job {job_id}.") + r = self.connection.get_request(f'bulkImportJobs/{job_id}') + logger.info(f'Found bulk import job {job_id}.') return r def get_bulk_import_job_results(self, job_id): @@ -62,8 +64,8 @@ def get_bulk_import_job_results(self, job_id): r = self.get_bulk_import_job(job_id) logger.info(f"Bulk Import Job Status: {r['status']}") - if r["status"] == "Completed": - return Table.from_csv(r["resultFiles"][0]["url"]) + if r['status'] == 'Completed': + return Table.from_csv(r['resultFiles'][0]['url']) return None @@ -76,8 +78,8 @@ def get_bulk_import_mapping_types(self): See :ref:`parsons-table` for output options. """ - tbl = Table(self.connection.get_request("bulkImportMappingTypes")) - logger.info(f"Found {tbl.num_rows} bulk import mapping types.") + tbl = Table(self.connection.get_request('bulkImportMappingTypes')) + logger.info(f'Found {tbl.num_rows} bulk import mapping types.') return tbl def get_bulk_import_mapping_type(self, type_name): @@ -91,8 +93,8 @@ def get_bulk_import_mapping_type(self, type_name): A mapping type json """ - r = self.connection.get_request(f"bulkImportMappingTypes/{type_name}") - logger.info(f"Found {type_name} bulk import mapping type.") + r = self.connection.get_request(f'bulkImportMappingTypes/{type_name}') + logger.info(f'Found {type_name} bulk import mapping type.') return r def get_bulk_import_mapping_type_fields(self, type_name, field_name): @@ -109,63 +111,45 @@ def get_bulk_import_mapping_type_fields(self, type_name, field_name): A mapping type fields json """ - r = self.connection.get_request( - f"bulkImportMappingTypes/{type_name}/{field_name}/values" - ) - logger.info(f"Found {type_name} bulk import mapping type field values.") + r = self.connection.get_request(f'bulkImportMappingTypes/{type_name}/{field_name}/values') + logger.info(f'Found {type_name} bulk import mapping type field values.') return r - def post_bulk_import( - self, - tbl, - url_type, - resource_type, - mapping_types, - description, - result_fields=None, - **url_kwargs, - ): + def post_bulk_import(self, tbl, url_type, resource_type, mapping_types, + description, result_fields=None, **url_kwargs): # Internal method to post bulk imports. # Move to cloud storage file_name = str(uuid.uuid1()) - url = cloud_storage.post_file( - tbl, - url_type, - file_path=file_name + ".zip", - quoting=csv.QUOTE_ALL, - **url_kwargs, - ) - logger.info(f"Table uploaded to {url_type}.") + url = cloud_storage.post_file(tbl, + url_type, + file_path=file_name + '.zip', + quoting=csv.QUOTE_ALL, + **url_kwargs) + logger.info(f'Table uploaded to {url_type}.') # Generate request json - json = { - "description": description, - "file": { - "columnDelimiter": "csv", - "columns": [{"name": c} for c in tbl.columns], - "fileName": file_name + ".csv", - "hasHeader": "True", - "hasQuotes": "True", - "sourceUrl": url, - }, - "actions": [ - { - "resultFileSizeKbLimit": 5000, - "resourceType": resource_type, - "actionType": "loadMappedFile", - "mappingTypes": mapping_types, + json = {"description": description, + "file": { + "columnDelimiter": 'csv', + "columns": [{'name': c} for c in tbl.columns], + "fileName": file_name + '.csv', + "hasHeader": "True", + "hasQuotes": "True", + "sourceUrl": url}, + "actions": [{"resultFileSizeKbLimit": 5000, + "resourceType": resource_type, + "actionType": "loadMappedFile", + "mappingTypes": mapping_types}] } - ], - } if result_fields: - result_fields = [{"name": c} for c in result_fields] - json["actions"][0]["columnsToIncludeInResultsFile"] = result_fields + result_fields = [{'name': c} for c in result_fields] + json['actions'][0]['columnsToIncludeInResultsFile'] = result_fields - r = self.connection.post_request("bulkImportJobs", json=json) + r = self.connection.post_request('bulkImportJobs', json=json) logger.info(f"Bulk upload {r['jobId']} created.") - return r["jobId"] + return r['jobId'] def bulk_apply_activist_codes(self, tbl, url_type, **url_kwargs): """ @@ -208,14 +192,12 @@ def bulk_apply_activist_codes(self, tbl, url_type, **url_kwargs): The bulk import job id """ - return self.post_bulk_import( - tbl, - url_type, - "ContactsActivistCodes", - [{"name": "ActivistCode"}], - "Activist Code Upload", - **url_kwargs, - ) + return self.post_bulk_import(tbl, + url_type, + 'ContactsActivistCodes', + [{"name": "ActivistCode"}], + 'Activist Code Upload', + **url_kwargs) def bulk_upsert_contacts(self, tbl, url_type, result_fields=None, **url_kwargs): """ @@ -328,80 +310,53 @@ def bulk_upsert_contacts(self, tbl, url_type, result_fields=None, **url_kwargs): tbl = tbl.map_columns(COLUMN_MAP, exact_match=False) - return self.post_bulk_import( - tbl, - url_type, - "Contacts", - [{"name": "CreateOrUpdateContact"}], - "Create Or Update Contact Records", - result_fields=result_fields, - **url_kwargs, - ) - - def bulk_apply_suppressions(self, tbl, url_type, **url_kwargs): - """ - Bulk apply contact suppressions codes. + return self.post_bulk_import(tbl, + url_type, + 'Contacts', + [{'name': 'CreateOrUpdateContact'}], + 'Create Or Update Contact Records', + result_fields=result_fields, + **url_kwargs) - The table may include the following columns. The first column - must be ``vanid``. + def create_mapping_types(self, tbl): + # Internal method to generate the correct mapping types based on + # the columns passed in the table. Not in use yet. - .. list-table:: - :widths: 25 25 - :header-rows: 1 + mapping_types = [] - * - Column Name - - Required - - Description - * - ``vanid`` - - Yes - - A valid VANID primary key - * - ``suppressionid`` - - Yes - - A valid suppression id + # If one of the following columns is found in the table, then add + # that mapping type. + mp = [('firstname', '') + ('Email', 'Email'), + ('MailingAddress', 'MailingAddress'), + ('Phone', 'Phones'), + ('ApplyContactCustomFields', 'CustomFieldGroupId')] - `Args:` - table: Parsons table - A Parsons table. - url_type: str - The cloud file storage to use to post the file (``S3`` or ``GCS``). - See :ref:`Cloud Storage ` for more details. - **url_kwargs: kwargs - Arguments to configure your cloud storage url type. See - :ref:`Cloud Storage ` for more details. - `Returns:` - int - The bulk import job id - """ + for col in tbl.columns: + for i in mp: + if col.lower() == i[0].lower(): + mapping_types.append({'name': i[1]}) - return self.post_bulk_import( - tbl, - url_type, - "Contacts", - [{"name": "Suppressions"}], - "Apply Suppressions", - **url_kwargs, - ) + return mapping_types # This is a column mapper that is used to accept additional column names and provide # flexibility for the user. -COLUMN_MAP = { - "firstname": ["fn", "first"], - "middlename": ["mn", "middle"], - "lastname": ["ln", "last"], - "dob": ["dateofbirth", "birthdate"], - "sex": ["gender"], - "addressline1": ["address", "addressline1", "address1"], - "addressline2": ["addressline2", "address2"], - "addressline3": ["addressline3", "address3"], - "city": [], - "stateorprovince": ["state", "st"], - "countrycode": ["country"], - "displayasentered": [], - "cellphone": ["cell"], - "cellphonecountrycode": ["cellcountrycode"], - "phone": ["home", "homephone"], - "phonecountrycode": ["phonecountrycode"], - "email": ["emailaddress"], -} +COLUMN_MAP = {'firstname': ['fn', 'first'], + 'middlename': ['mn', 'middle'], + 'lastname': ['ln', 'last'], + 'dob': ['dateofbirth', 'birthdate'], + 'sex': ['gender'], + 'addressline1': ['address', 'addressline1', 'address1'], + 'addressline2': ['addressline2', 'address2'], + 'addressline3': ['addressline3', 'address3'], + 'city': [], + 'stateorprovince': ['state', 'st'], + 'countrycode': ['country'], + 'displayasentered': [], + 'cellphone': ['cell'], + 'cellphonecountrycode': ['cellcountrycode'], + 'phone': ['home', 'homephone'], + 'phonecountrycode': ['phonecountrycode'], + 'email': ['emailaddress']} diff --git a/parsons/ngpvan/canvass_responses.py b/parsons/ngpvan/canvass_responses.py index 2d1371ee79..a2c6a2f17e 100644 --- a/parsons/ngpvan/canvass_responses.py +++ b/parsons/ngpvan/canvass_responses.py @@ -7,6 +7,7 @@ class CanvassResponses(object): + def __init__(self, van_connection): self.connection = van_connection @@ -20,8 +21,8 @@ def get_canvass_responses_contact_types(self): See :ref:`parsons-table` for output options. """ - tbl = Table(self.connection.get_request("canvassResponses/contactTypes")) - logger.info(f"Found {tbl.num_rows} canvass response contact types.") + tbl = Table(self.connection.get_request('canvassResponses/contactTypes')) + logger.info(f'Found {tbl.num_rows} canvass response contact types.') return tbl def get_canvass_responses_input_types(self): @@ -33,8 +34,8 @@ def get_canvass_responses_input_types(self): See :ref:`parsons-table` for output options. """ - tbl = Table(self.connection.get_request("canvassResponses/inputTypes")) - logger.info(f"Found {tbl.num_rows} canvass response input types.") + tbl = Table(self.connection.get_request('canvassResponses/inputTypes')) + logger.info(f'Found {tbl.num_rows} canvass response input types.') return tbl def get_canvass_responses_result_codes(self): @@ -46,6 +47,6 @@ def get_canvass_responses_result_codes(self): See :ref:`parsons-table` for output options. """ - tbl = Table(self.connection.get_request("canvassResponses/resultCodes")) - logger.info(f"Found {tbl.num_rows} canvass response result codes.") + tbl = Table(self.connection.get_request('canvassResponses/resultCodes')) + logger.info(f'Found {tbl.num_rows} canvass response result codes.') return tbl diff --git a/parsons/ngpvan/changed_entities.py b/parsons/ngpvan/changed_entities.py index e71da4116f..98ec6d4a15 100644 --- a/parsons/ngpvan/changed_entities.py +++ b/parsons/ngpvan/changed_entities.py @@ -10,6 +10,7 @@ class ChangedEntities(object): + def __init__(self): pass @@ -22,8 +23,8 @@ def get_changed_entity_resources(self): list """ - r = self.connection.get_request("changedEntityExportJobs/resources") - logger.info(f"Found {len(r)} changed entity resources.") + r = self.connection.get_request('changedEntityExportJobs/resources') + logger.info(f'Found {len(r)} changed entity resources.') return r def get_changed_entity_resource_fields(self, resource_type): @@ -37,23 +38,12 @@ def get_changed_entity_resource_fields(self, resource_type): See :ref:`parsons-table` for output options. """ - tbl = Table( - self.connection.get_request( - f"changedEntityExportJobs/fields/{resource_type}" - ) - ) - logger.info(f"Found {tbl.num_rows} fields for {resource_type}.") + tbl = Table(self.connection.get_request(f'changedEntityExportJobs/fields/{resource_type}')) + logger.info(f'Found {tbl.num_rows} fields for {resource_type}.') return tbl - def get_changed_entities( - self, - resource_type, - date_from, - date_to=None, - include_inactive=False, - requested_fields=None, - custom_fields=None, - ): + def get_changed_entities(self, resource_type, date_from, date_to=None, include_inactive=False, + requested_fields=None, custom_fields=None): """ Get modified records for VAN from up to 90 days in the past. @@ -88,22 +78,22 @@ def get_changed_entities( "requestedFields": requested_fields, "requestedCustomFieldIds": custom_fields, "fileSizeKbLimit": 100000, - "includeInactive": include_inactive, + "includeInactive": include_inactive } - r = self.connection.post_request("changedEntityExportJobs", json=json) + r = self.connection.post_request('changedEntityExportJobs', json=json) while True: - status = self._get_changed_entity_job(r["exportJobId"]) - if status["jobStatus"] in ["Pending", "InProcess"]: - logger.info("Waiting on export file.") + status = self._get_changed_entity_job(r['exportJobId']) + if status['jobStatus'] in ['Pending', 'InProcess']: + logger.info('Waiting on export file.') time.sleep(RETRY_RATE) - elif status["jobStatus"] == "Complete": - return Table.from_csv(status["files"][0]["downloadUrl"]) + elif status['jobStatus'] == 'Complete': + return Table.from_csv(status['files'][0]['downloadUrl']) else: - raise ValueError(status["message"]) + raise ValueError(status['message']) def _get_changed_entity_job(self, job_id): - r = self.connection.get_request(f"changedEntityExportJobs/{job_id}") + r = self.connection.get_request(f'changedEntityExportJobs/{job_id}') return r diff --git a/parsons/ngpvan/codes.py b/parsons/ngpvan/codes.py index 40f0c32cdb..dd6be989df 100644 --- a/parsons/ngpvan/codes.py +++ b/parsons/ngpvan/codes.py @@ -6,13 +6,13 @@ class Codes(object): + def __init__(self, van_connection): self.connection = van_connection - def get_codes( - self, name=None, supported_entities=None, parent_code_id=None, code_type=None - ): + def get_codes(self, name=None, supported_entities=None, parent_code_id=None, + code_type=None): """ Get codes. @@ -30,16 +30,15 @@ def get_codes( See :ref:`parsons-table` for output options. """ - params = { - "name": name, - "supportedEntities": supported_entities, - "parentCodeId": parent_code_id, - "codeType": code_type, - "$top": 200, - } + params = {'name': name, + 'supportedEntities': supported_entities, + 'parentCodeId': parent_code_id, + 'codeType': code_type, + '$top': 200 + } - tbl = Table(self.connection.get_request("codes", params=params)) - logger.info(f"Found {tbl.num_rows} codes.") + tbl = Table(self.connection.get_request('codes', params=params)) + logger.info(f'Found {tbl.num_rows} codes.') return tbl def get_code(self, code_id): @@ -54,9 +53,9 @@ def get_code(self, code_id): See :ref:`parsons-table` for output options. """ - c = self.connection.get_request(f"codes/{code_id}") + c = self.connection.request(f'codes/{code_id}') logger.debug(c) - logger.info(f"Found code {code_id}.") + logger.info(f'Found code {code_id}.') return c def get_code_types(self): @@ -68,18 +67,12 @@ def get_code_types(self): A list of code types. """ - lst = self.connection.get_request("codeTypes") - logger.info(f"Found {len(lst)} code types.") + lst = self.connection.get_request('codeTypes') + logger.info(f'Found {len(lst)} code types.') return lst - def create_code( - self, - name=None, - parent_code_id=None, - description=None, - code_type="SourceCode", - supported_entities=None, - ): + def create_code(self, name=None, parent_code_id=None, description=None, + code_type='SourceCode', supported_entities=None): """ Create a code. @@ -113,39 +106,25 @@ def create_code( ] """ - json = { - "parentCodeId": parent_code_id, - "name": name, - "codeType": code_type, - "description": description, - } + json = {"parentCodeId": parent_code_id, + "name": name, + "codeType": code_type, + "description": description} if supported_entities: - se = [ - { - "name": s["name"], - "isSearchable": s["is_searchable"], - "isApplicable": s["is_applicable"], - } - for s in supported_entities - ] + se = [{'name': s['name'], + 'isSearchable': s['is_searchable'], + 'is_applicable': s['is_applicable']} for s in supported_entities] - json["supportedEntities"] = se + json['supportedEntities'] = se - r = self.connection.post_request("codes", json=json) - logger.info(f"Code {r} created.") + r = self.connection.post_request('codes', json=json) + logger.info(f'Code {r} created.') return r - def update_code( - self, - code_id, - name=None, - parent_code_id=None, - description=None, - code_type="SourceCode", - supported_entities=None, - ): + def update_code(self, code_id, name=None, parent_code_id=None, description=None, + code_type='SourceCode', supported_entities=None): """ Update a code. @@ -184,28 +163,23 @@ def update_code( post_data = {} if name: - post_data["name"] = name + post_data['name'] = name if parent_code_id: - post_data["parentCodeId"] = parent_code_id + post_data['parentCodeId'] = parent_code_id if code_type: - post_data["codeType"] = code_type + post_data['codeType'] = code_type if description: - post_data["description"] = description + post_data['description'] = description if supported_entities: - se = [ - { - "name": s["name"], - "isSearchable": s["is_searchable"], - "isApplicable": s["is_applicable"], - } - for s in supported_entities - ] - post_data["supportedEntities"] = se - - r = self.connection.put_request(f"codes/{code_id}", json=post_data) - logger.info(f"Code {code_id} updated.") + se = [{'name': s['name'], + 'isSearchable': s['is_searchable'], + 'is_applicable': s['is_applicable']} for s in supported_entities] + post_data['supportedEntities'] = se + + r = self.connection.put_request(f'codes/{code_id}', json=post_data) + logger.info(f'Code {code_id} updated.') return r def delete_code(self, code_id): @@ -219,8 +193,8 @@ def delete_code(self, code_id): ``None`` """ - r = self.connection.delete_request(f"codes/{code_id}") - logger.info(f"Code {code_id} deleted.") + r = self.connection.delete_request(f'codes/{code_id}') + logger.info(f'Code {code_id} deleted.') return r def get_code_supported_entities(self): @@ -232,6 +206,6 @@ def get_code_supported_entities(self): A list of code supported entities. """ - lst = self.connection.get_request("codes/supportedEntities") - logger.info(f"Found {len(lst)} code supported entities.") + lst = self.connection.get_request('codes/supportedEntities') + logger.info(f'Found {len(lst)} code supported entities.') return lst diff --git a/parsons/ngpvan/contact_notes.py b/parsons/ngpvan/contact_notes.py deleted file mode 100644 index 04a118ff43..0000000000 --- a/parsons/ngpvan/contact_notes.py +++ /dev/null @@ -1,56 +0,0 @@ -"""NGPVAN Contact Notes Endpoints""" - -from parsons.etl.table import Table -import logging - -logger = logging.getLogger(__name__) - - -class ContactNotes(object): - def __init__(self, van_connection): - self.connection = van_connection - - def get_contact_notes(self, van_id): - """ - Get custom fields. - - `Args:` - van_id : str - VAN ID for the person to get notes for. - `Returns:` - Parsons Table - See :ref:`parsons-table` for output options. - """ - - tbl = Table(self.connection.get_request(f"people/{van_id}/notes")) - logger.info(f"Found {tbl.num_rows} custom fields.") - return tbl - - def create_contact_note( - self, van_id, text, is_view_restricted, note_category_id=None - ): - """ - Create a contact note - - `Args:` - van_id: str - VAN ID for the person this note will be applied to. - text: str - The content of the note. - is_view_restricted: bool - Set to true if the note should be restricted only to certain users within - the current context; set to false if the note may be viewed by any user - in the current context. - note_category_id: int - Optional; if set, the note category for this note. - `Returns:` - int - The note ID. - """ - note = {"text": text, "isViewRestricted": is_view_restricted} - if note_category_id is not None: - note["category"] = {"noteCategoryId": note_category_id} - - r = self.connection.post_request(f"people/{van_id}/notes", json=note) - logger.info(f"Contact note {r} created.") - return r diff --git a/parsons/ngpvan/custom_fields.py b/parsons/ngpvan/custom_fields.py index 8fb439024a..13e7c5f50b 100644 --- a/parsons/ngpvan/custom_fields.py +++ b/parsons/ngpvan/custom_fields.py @@ -4,12 +4,13 @@ logger = logging.getLogger(__name__) -class CustomFields: +class CustomFields(): + def __init__(self, van_connection): self.connection = van_connection - def get_custom_fields(self, field_type="contacts"): + def get_custom_fields(self, field_type='contacts'): """ Get custom fields. @@ -22,13 +23,13 @@ def get_custom_fields(self, field_type="contacts"): See :ref:`parsons-table` for output options. """ - params = {"customFieldsGroupType": field_type.capitalize()} + params = {'customFieldsGroupType': field_type.capitalize()} - tbl = Table(self.connection.get_request("customFields", params=params)) - logger.info(f"Found {tbl.num_rows} custom fields.") + tbl = Table(self.connection.get_request('customFields', params=params)) + logger.info(f'Found {tbl.num_rows} custom fields.') return tbl - def get_custom_fields_values(self, field_type="contacts"): + def get_custom_fields_values(self, field_type='contacts'): """ Get custom field values as a long table. @@ -45,22 +46,16 @@ def get_custom_fields_values(self, field_type="contacts"): # Some custom fields do no have associated values. If this is the case then # we should return an empty Table, but with the expected columns. - if tbl.get_column_types("availableValues") == ["NoneType"]: - logger.info("Found 0 custom field values.") - return Table( - [ - { - "customFieldId": None, - "id": None, - "name": None, - "parentValueId": None, - } - ] - ) + if tbl.get_column_types('availableValues') == ['NoneType']: + logger.info('Found 0 custom field values.') + return Table([{'customFieldId': None, + 'id': None, + 'name': None, + 'parentValueId': None}]) else: - logger.info(f"Found {tbl.num_rows} custom field values.") - return tbl.long_table("customFieldId", "availableValues", prepend=False) + logger.info(f'Found {tbl.num_rows} custom field values.') + return tbl.long_table('customFieldId', 'availableValues', prepend=False) def get_custom_field(self, custom_field_id): """ @@ -73,6 +68,6 @@ def get_custom_field(self, custom_field_id): A json. """ - r = self.connection.get_request(f"customFields/{custom_field_id}") - logger.info(f"Found custom field {custom_field_id}.") + r = self.connection.get_request(f'customFields/{custom_field_id}') + logger.info(f'Found custom field {custom_field_id}.') return r diff --git a/parsons/ngpvan/events.py b/parsons/ngpvan/events.py index c0c5d17ab3..9635ef3de9 100644 --- a/parsons/ngpvan/events.py +++ b/parsons/ngpvan/events.py @@ -1,36 +1,22 @@ """NGPVAN Events Endpoints""" -import logging - from parsons.etl.table import Table +import logging logger = logging.getLogger(__name__) class Events(object): + def __init__(self, van_connection): self.connection = van_connection - def get_events( - self, - code_ids=None, - event_type_ids=None, - rep_event_id=None, - starting_after=None, - starting_before=None, - district_field=None, - expand_fields=[ - "locations", - "codes", - "shifts", - "roles", - "notes", - "financialProgram", - "ticketCategories", - "onlineForms", - ], - ): + def get_events(self, code_ids=None, event_type_ids=None, rep_event_id=None, + starting_after=None, starting_before=None, district_field=None, + expand_fields=['locations', 'codes', 'shifts', 'roles', 'notes', + 'financialProgram', 'ticketCategories', + 'onlineForms']): """ Get events. @@ -58,37 +44,25 @@ def get_events( """ if expand_fields: - expand_fields = ",".join(expand_fields) - - params = { - "codeIds": code_ids, - "eventTypeIds": event_type_ids, - "inRepetitionWithEventId": rep_event_id, - "startingAfter": starting_after, - "startingBefore": starting_before, - "districtFieldValue": district_field, - "$top": 50, - "$expand": expand_fields, - } - - tbl = Table(self.connection.get_request("events", params=params)) - logger.info(f"Found {tbl.num_rows} events.") + expand_fields = ','.join(expand_fields) + + params = {'codeIds': code_ids, + 'eventTypeIds': event_type_ids, + 'inRepetitionWithEventId': rep_event_id, + 'startingAfter': starting_after, + 'startingBefore': starting_before, + 'districtFieldValue': district_field, + '$top': 50, + '$expand': expand_fields + } + + tbl = Table(self.connection.get_request('events', params=params)) + logger.info(f'Found {tbl.num_rows} events.') return tbl - def get_event( - self, - event_id, - expand_fields=[ - "locations", - "codes", - "shifts", - "roles", - "notes", - "financialProgram", - "ticketCategories", - "voterRegistrationBatches", - ], - ): + def get_event(self, event_id, expand_fields=['locations', 'codes', 'shifts', 'roles', + 'notes', 'financialProgram', 'ticketCategories', + 'voterRegistrationBatches']): """ Get an event. @@ -106,32 +80,16 @@ def get_event( """ if expand_fields: - expand_fields = ",".join(expand_fields) + expand_fields = ','.join(expand_fields) - r = self.connection.get_request( - f"events/{event_id}", params={"$expand": expand_fields} - ) - logger.info(f"Found event {event_id}.") + r = self.connection.get_request(f'events/{event_id}', params={'$expand': expand_fields}) + logger.info(f'Found event {event_id}.') return r - def create_event( - self, - name, - short_name, - start_date, - end_date, - event_type_id, - roles, - shifts=None, - description=None, - editable=False, - publicly_viewable=False, - location_ids=None, - code_ids=None, - notes=None, - district_field_value=None, - voter_registration_batches=None, - ): + def create_event(self, name, short_name, start_date, end_date, event_type_id, + roles, shifts=None, description=None, editable=False, + publicly_viewable=False, location_ids=None, code_ids=None, notes=None, + district_field_value=None, voter_registration_batches=None): """ Create an event @@ -189,45 +147,37 @@ def create_event( """ if shifts is None: - shifts = [ - {"name": "Default Shift", "startTime": start_date, "endTime": end_date} - ] + shifts = [{'name': 'Default Shift', + 'startTime': start_date, + 'endTime': end_date}] else: - shifts = [ - { - "name": s["name"], - "startTime": s["start_time"], - "endTime": s["end_time"], - } - for s in shifts - ] - - event = { - "name": name, - "shortName": short_name, - "description": description, - "startDate": start_date, - "endDate": end_date, - "eventType": {"eventTypeId": event_type_id}, - "isOnlyEditableByCreatingUser": str(editable).lower(), - "isPubliclyViewable": publicly_viewable, - "notes": notes, - "shifts": shifts, - "roles": [{"roleId": r} for r in roles], - "districtFieldValue": district_field_value, - "voterRegistrationBatches": voter_registration_batches, - } + shifts = [{'name': s['name'], + 'startTime': s['start_time'], + 'endTime': s['end_time']} for s in shifts] + + event = {'name': name, + 'shortName': short_name, + 'description': description, + 'startDate': start_date, + 'endDate': end_date, + 'eventType': {'eventTypeId': event_type_id}, + 'isOnlyEditableByCreatingUser': str(editable).lower(), + 'isPubliclyViewable': publicly_viewable, + 'notes': notes, + 'shifts': shifts, + 'roles': [{'roleId': r} for r in roles], + 'districtFieldValue': district_field_value, + 'voterRegistrationBatches': voter_registration_batches + } if location_ids: - event["locations"] = ( - [{"locationId": location_id} for location_id in location_ids], - ) + event['locations'] = [{'locationId': l} for l in location_ids], # noqa E741 if code_ids: - event["codes"] = [{"codeID": c} for c in code_ids] + event['codes'] = [{'codeID': c} for c in code_ids] - r = self.connection.post_request("events", json=event) - logger.info(f"Event {r} created.") + r = self.connection.post_request('events', json=event) + logger.info(f'Event {r} created.') return r def delete_event(self, event_id): @@ -241,8 +191,8 @@ def delete_event(self, event_id): ``None`` """ - r = self.connection.delete_request(f"events/{event_id}") - logger.info(f"Event {event_id} deleted.") + r = self.connection.delete_request(f'events/{event_id}') + logger.info(f'Event {event_id} deleted.') return r def add_event_shift(self, event_id, shift_name, start_time, end_time): @@ -263,10 +213,13 @@ def add_event_shift(self, event_id, shift_name, start_time, end_time): The shift id. """ - shift = {"name": shift_name, "startTime": start_time, "endTime": end_time} + shift = {'name': shift_name, + 'startTime': start_time, + 'endTime': end_time + } - r = self.connection.post_request(f"events/{event_id}/shifts", json=shift) - logger.info(f"Shift {r} added.") + r = self.connection.post_request(f'events/{event_id}/shifts', json=shift) + logger.info(f'Shift {r} added.') return r def get_event_types(self): @@ -278,6 +231,6 @@ def get_event_types(self): See :ref:`parsons-table` for output options. """ - tbl = Table(self.connection.get_request("events/types")) - logger.info(f"Found {tbl.num_rows} events.") + tbl = Table(self.connection.get_request('events/types')) + logger.info(f'Found {tbl.num_rows} events.') return tbl diff --git a/parsons/ngpvan/locations.py b/parsons/ngpvan/locations.py index c3a7537628..77bffa1098 100644 --- a/parsons/ngpvan/locations.py +++ b/parsons/ngpvan/locations.py @@ -7,6 +7,7 @@ class Locations(object): + def __init__(self, van_connection): self.connection = van_connection @@ -21,10 +22,10 @@ def get_locations(self, name=None): `Returns:` Parsons Table See :ref:`parsons-table` for output options. - """ + """ - tbl = Table(self.connection.get_request("locations", params={"name": name})) - logger.info(f"Found {tbl.num_rows} locations.") + tbl = Table(self.connection.get_request('locations', params={'name': name})) + logger.info(f'Found {tbl.num_rows} locations.') return self._unpack_loc(tbl) def get_location(self, location_id): @@ -38,19 +39,12 @@ def get_location(self, location_id): dict """ - r = self.connection.get_request(f"locations/{location_id}") - logger.info(f"Found location {location_id}.") + r = self.connection.get_request(f'locations/{location_id}') + logger.info(f'Found location {location_id}.') return r - def create_location( - self, - name, - address_line1=None, - address_line2=None, - city=None, - state=None, - zip_code=None, - ): + def create_location(self, name, address_line1=None, address_line2=None, city=None, + state=None, zip_code=None): """ Find or create a location. If location already exists, will return location id. @@ -72,19 +66,17 @@ def create_location( A location id. """ - location = { - "name": name, - "address": { - "addressLine1": address_line1, - "addressLine2": address_line2, - "city": city, - "stateOrProvince": state, - "zipOrPostalCode": zip_code, - }, - } - - r = self.connection.post_request("locations/findOrCreate", json=location) - logger.info(f"Location {r} created.") + location = {'name': name, + 'address': { + 'addressLine1': address_line1, + 'addressLine2': address_line2, + 'city': city, + 'stateOrProvince': state, + 'zipOrPostalCode': zip_code + }} + + r = self.connection.post_request('locations/findOrCreate', json=location) + logger.info(f'Location {r} created.') return r def delete_location(self, location_id): @@ -98,8 +90,8 @@ def delete_location(self, location_id): ``None`` """ - r = self.connection.delete_request(f"locations/{location_id}") - logger.info(f"Location {location_id} deleted.") + r = self.connection.delete_request(f'locations/{location_id}') + logger.info(f'Location {location_id} deleted.') return r def _unpack_loc(self, table): @@ -108,10 +100,10 @@ def _unpack_loc(self, table): if isinstance(table, tuple): return table - if "address" in table.columns: - table.unpack_dict("address", prepend=False) + if 'address' in table.columns: + table.unpack_dict('address', prepend=False) - if "geoLocation" in table.columns: - table.unpack_dict("geoLocation", prepend=False) + if 'geoLocation' in table.columns: + table.unpack_dict('geoLocation', prepend=False) return table diff --git a/parsons/ngpvan/people.py b/parsons/ngpvan/people.py index b195780a24..59cbee0059 100644 --- a/parsons/ngpvan/people.py +++ b/parsons/ngpvan/people.py @@ -5,22 +5,13 @@ class People(object): + def __init__(self, van_connection): self.connection = van_connection - def find_person( - self, - first_name=None, - last_name=None, - date_of_birth=None, - email=None, - phone=None, - phone_type=None, - street_number=None, - street_name=None, - zip=None, - ): + def find_person(self, first_name=None, last_name=None, date_of_birth=None, email=None, + phone=None, phone_type=None, street_number=None, street_name=None, zip=None): """ Find a person record. @@ -28,11 +19,11 @@ def find_person( Person find must include the following minimum combinations to conduct a search. - - first_name, last_name, email - - first_name, last_name, phone - - first_name, last_name, zip5, date_of_birth - - first_name, last_name, street_number, street_name, zip5 - - email_address + - first_name, last_name, email + - first_name, last_name, phone + - first_name, last_name, zip5, date_of_birth + - first_name, last_name, street_number, street_name, zip5 + - email_address `Args:` first_name: str @@ -55,7 +46,7 @@ def find_person( A person dict object """ - logger.info(f"Finding {first_name} {last_name}.") + logger.info(f'Finding {first_name} {last_name}.') return self._people_search( first_name=first_name, @@ -66,7 +57,7 @@ def find_person( phone_type=phone_type, street_number=street_number, street_name=street_name, - zip=zip, + zip=zip ) def find_person_json(self, match_json): @@ -77,11 +68,11 @@ def find_person_json(self, match_json): Person find must include the following minimum combinations to conduct a search. - - first_name, last_name, email - - first_name, last_name, phone - - first_name, last_name, zip5, date_of_birth - - first_name, last_name, street_number, street_name, zip5 - - email_address + - first_name, last_name, email + - first_name, last_name, phone + - first_name, last_name, zip5, date_of_birth + - first_name, last_name, street_number, street_name, zip5 + - email_address .. note:: A full list of possible values for the json, and its structure can be found @@ -95,24 +86,13 @@ def find_person_json(self, match_json): A person dict object """ - logger.info("Finding a match for json details.") + logger.info('Finding a match for json details.') return self._people_search(match_json=match_json) - def update_person( - self, - id=None, - id_type="vanid", - first_name=None, - last_name=None, - date_of_birth=None, - email=None, - phone=None, - phone_type=None, - street_number=None, - street_name=None, - zip=None, - ): + def update_person(self, id=None, id_type='vanid', first_name=None, last_name=None, + date_of_birth=None, email=None, phone=None, phone_type=None, + street_number=None, street_name=None, zip=None): """ Update a person record based on a provided ID. All other arguments provided will be updated on the record. @@ -161,10 +141,10 @@ def update_person( street_number=street_number, street_name=street_name, zip=zip, - create=True, + create=True ) - def update_person_json(self, id, id_type="vanid", match_json=None): + def update_person_json(self, id, id_type='vanid', match_json=None): """ Update a person record based on a provided ID within the match_json dict. @@ -184,33 +164,20 @@ def update_person_json(self, id, id_type="vanid", match_json=None): A person dict """ - return self._people_search( - id=id, id_type=id_type, match_json=match_json, create=True - ) + return self._people_search(id=id, id_type=id_type, match_json=match_json, create=True) - def upsert_person( - self, - first_name=None, - last_name=None, - date_of_birth=None, - email=None, - phone=None, - phone_type=None, - street_number=None, - street_name=None, - zip=None, - ): + def upsert_person(self, first_name=None, last_name=None, date_of_birth=None, email=None, + phone=None, phone_type=None, street_number=None, street_name=None, zip=None): """ Create or update a person record. .. note:: Person find must include the following minimum combinations. - - - first_name, last_name, email - - first_name, last_name, phone - - first_name, last_name, zip5, date_of_birth - - first_name, last_name, street_number, street_name, zip5 - - email_address + - first_name, last_name, email + - first_name, last_name, phone + - first_name, last_name, zip5, date_of_birth + - first_name, last_name, street_number, street_name, zip5 + - email_address .. warning:: This method can only be run on MyMembers, EveryAction, MyCampaign databases. @@ -249,7 +216,7 @@ def upsert_person( street_number=street_number, street_name=street_name, zip=zip, - create=True, + create=True ) def upsert_person_json(self, match_json): @@ -258,12 +225,11 @@ def upsert_person_json(self, match_json): .. note:: Person find must include the following minimum combinations. - - - first_name, last_name, email - - first_name, last_name, phone - - first_name, last_name, zip5, date_of_birth - - first_name, last_name, street_number, street_name, zip5 - - email_address + - first_name, last_name, email + - first_name, last_name, phone + - first_name, last_name, zip5, date_of_birth + - first_name, last_name, street_number, street_name, zip5 + - email_address .. note:: A full list of possible values for the json, and its structure can be found @@ -282,27 +248,15 @@ def upsert_person_json(self, match_json): return self._people_search(match_json=match_json, create=True) - def _people_search( - self, - id=None, - id_type=None, - first_name=None, - last_name=None, - date_of_birth=None, - email=None, - phone=None, - phone_type="H", - street_number=None, - street_name=None, - zip=None, - match_json=None, - create=False, - ): + def _people_search(self, id=None, id_type=None, first_name=None, last_name=None, + date_of_birth=None, email=None, phone=None, phone_type='H', + street_number=None, street_name=None, zip=None, match_json=None, + create=False): # Internal method to hit the people find/create endpoints addressLine1 = None if street_name and street_number: - addressLine1 = f"{street_number} {street_name}" + addressLine1 = f'{street_number} {street_name}' # Check to see if a match map has been provided if not match_json: @@ -310,37 +264,37 @@ def _people_search( # Will fail if empty dicts are provided, hence needed to add if exist if email: - json["emails"] = [{"email": email}] + json['emails'] = [{'email': email}] if phone: # To Do: Strip out non-integers from phone - json["phones"] = [{"phoneNumber": phone, "phoneType": phone_type}] + json['phones'] = [{'phoneNumber': phone, 'phoneType': phone_type}] if date_of_birth: - json["dateOfBirth"] = date_of_birth + json['dateOfBirth'] = date_of_birth if zip or addressLine1: - json["addresses"] = [{}] + json['addresses'] = [{}] if zip: - json["addresses"][0]["zipOrPostalCode"] = zip + json['addresses'][0]['zipOrPostalCode'] = zip if addressLine1: - json["addresses"][0]["addressLine1"] = addressLine1 + json['addresses'][0]['addressLine1'] = addressLine1 else: json = match_json - if "vanId" in match_json: - id = match_json["vanId"] + if 'vanId' in match_json: + id = match_json['vanId'] - url = "people/" + url = 'people/' if id: if create: - id_type = "" if id_type in ("vanid", None) else f"{id_type}:" + id_type = '' if id_type in ('vanid', None) else f"{id_type}:" url += id_type + str(id) else: return self.get_person(id, id_type=id_type) else: - url += "find" + url += 'find' if create: - url += "OrCreate" + url += 'OrCreate' else: # Ensure that the minimum combination of fields were passed json_flat = json_format.flatten_json(json) @@ -348,29 +302,17 @@ def _people_search( return self.connection.post_request(url, json=json) - def _valid_search( - self, - firstName=None, - lastName=None, - email=None, - phoneNumber=None, - dateOfBirth=None, - addressLine1=None, - zipOrPostalCode=None, - **kwargs, - ): + def _valid_search(self, firstName=None, lastName=None, email=None, phoneNumber=None, + dateOfBirth=None, addressLine1=None, zipOrPostalCode=None, **kwargs): # Internal method to check if a search is valid, kwargs are ignored - if ( - None in [firstName, lastName, email] - and None in [firstName, lastName, phoneNumber] - and None in [firstName, lastName, zipOrPostalCode, dateOfBirth] - and None in [firstName, lastName, addressLine1, zipOrPostalCode] - and None in [email] - ): + if (None in [firstName, lastName, email] and + None in [firstName, lastName, phoneNumber] and + None in [firstName, lastName, zipOrPostalCode, dateOfBirth] and + None in [firstName, lastName, addressLine1, zipOrPostalCode] and + None in [email]): - raise ValueError( - """ + raise ValueError(""" Person find must include the following minimum combinations to conduct a search. - first_name, last_name, email @@ -378,37 +320,17 @@ def _valid_search( - first_name, last_name, zip, dob - first_name, last_name, street_number, street_name, zip - email - """ - ) + """) return True - def get_person( - self, - id, - id_type="vanid", - expand_fields=[ - "contribution_history", - "addresses", - "phones", - "emails", - "codes", - "custom_fields", - "external_ids", - "preferences", - "recorded_addresses", - "reported_demographics", - "suppressions", - "cases", - "custom_properties", - "districts", - "election_records", - "membership_statuses", - "notes", - "organization_roles", - "disclosure_field_values", - ], - ): + def get_person(self, id, id_type='vanid', expand_fields=[ + 'contribution_history', 'addresses', 'phones', 'emails', + 'codes', 'custom_fields', 'external_ids', 'preferences', + 'recorded_addresses', 'reported_demographics', 'suppressions', + 'cases', 'custom_properties', 'districts', 'election_records', + 'membership_statuses', 'notes', 'organization_roles', + 'disclosure_field_values']): """ Returns a single person record using their VANID or external id. @@ -431,47 +353,24 @@ def get_person( """ # Change end point based on id type - url = "people/" + url = 'people/' - id_type = "" if id_type in ("vanid", None) else f"{id_type}:" + id_type = '' if id_type in ('vanid', None) else f"{id_type}:" url += id_type + str(id) + expand_fields = ','.join([json_format.arg_format(f) for f in expand_fields]) + # Removing the fields that are not returned in MyVoters - NOT_IN_MYVOTERS = ["codes", "contribution_history", "organization_roles"] + NOT_IN_MYVOTERS = ['codes', 'contribution_history', 'organization_roles'] if self.connection.db_code == 0: expand_fields = [v for v in expand_fields if v not in NOT_IN_MYVOTERS] - expand_fields = ",".join([json_format.arg_format(f) for f in expand_fields]) + logger.info(f'Getting person with {id_type} of {id} at url {url}') + return self.connection.get_request(url, params={'$expand': expand_fields}) - logger.info(f'Getting person with {id_type or "vanid"} of {id} at url {url}') - return self.connection.get_request(url, params={"$expand": expand_fields}) - - def delete_person(self, vanid): - """ - Suppress the given VANID in databases where contact records can be suppressed. - - `Args:` - vanid: str - The person's VAN ID. - `Returns:` - Success or error. - """ - url = f"people/{vanid}" - r = self.connection.delete_request(url) - logger.info(f"Van ID {vanid} suppressed.") - return r - - def apply_canvass_result( - self, - id, - result_code_id, - id_type="vanid", - contact_type_id=None, - input_type_id=None, - date_canvassed=None, - phone=None, - ): + def apply_canvass_result(self, id, result_code_id, id_type='vanid', contact_type_id=None, + input_type_id=None, date_canvassed=None): """ Apply a canvass result to a person. Use this end point for attempts that do not result in a survey response or an activist code (e.g. Not Home). @@ -491,35 +390,18 @@ def apply_canvass_result( `Optional`; Defaults to 11 (API Input) date_canvassed : str `Optional`; ISO 8601 formatted date. Defaults to todays date - phone: str - `Optional`; Phone number of any type (Work, Cell, Home) `Returns:` ``None`` """ - logger.info(f"Applying result code {result_code_id} to {id_type} {id}.") - self.apply_response( - id, - None, - id_type=id_type, - contact_type_id=contact_type_id, - input_type_id=input_type_id, - date_canvassed=date_canvassed, - result_code_id=result_code_id, - phone=phone, - ) + logger.info(f'Applying result code {result_code_id} to {id_type} {id}.') + self.apply_response(id, None, id_type=id_type, contact_type_id=contact_type_id, + input_type_id=input_type_id, date_canvassed=date_canvassed, + result_code_id=result_code_id) - def toggle_volunteer_action( - self, - id, - volunteer_activity_id, - action, - id_type="vanid", - result_code_id=None, - contact_type_id=None, - input_type_id=None, - date_canvassed=None, - ): + def toggle_volunteer_action(self, id, volunteer_activity_id, action, id_type='vanid', + result_code_id=None, contact_type_id=None, input_type_id=None, + date_canvassed=None): """ Apply or remove a volunteer action to or from a person. @@ -558,18 +440,8 @@ def toggle_volunteer_action( result_code_id) """ - def apply_response( - self, - id, - response, - id_type="vanid", - contact_type_id=None, - input_type_id=None, - date_canvassed=None, - result_code_id=None, - omit_contact=False, - phone=None, - ): + def apply_response(self, id, response, id_type='vanid', contact_type_id=None, + input_type_id=None, date_canvassed=None, result_code_id=None): """ Apply responses such as survey questions, activist codes, and volunteer actions to a person record. This method allows you apply multiple responses (e.g. two survey @@ -597,13 +469,6 @@ def apply_response( date_canvassed : str `Optional`; ISO 8601 formatted date. Defaults to todays date responses : list or dict - The responses to apply. - omit_contact: boolean - Omit adding contact history to the response. This is particularly - useful when adding activist codes that are not based on contact - attempts. - phone: str - `Optional`; Phone number of any type (Work, Cell, Home) `Returns:` ``True`` if successful @@ -614,40 +479,25 @@ def apply_response( "type": "ActivistCode"}, {"surveyQuestionId": 109149, "surveyResponseId": 465468, - "type": "SurveyResponse"} + "action": "SurveyResponse"} ] van.apply_response(5222, response) """ # noqa: E501,E261 # Set url based on id_type - if id_type == "vanid": + if id_type == 'vanid': url = f"people/{id}/canvassResponses" else: url = f"people/{id_type}:{id}/canvassResponses" - json = { - "canvassContext": { - "contactTypeId": contact_type_id, - "inputTypeId": input_type_id, - "dateCanvassed": date_canvassed, - "omitActivistCodeContactHistory": omit_contact, - }, - "resultCodeId": result_code_id, - } - - if contact_type_id == 1 or contact_type_id == 37: - if phone: - json["canvassContext"]["phone"] = { - "dialingPrefix": "1", - "phoneNumber": phone, - } - else: - raise Exception( - "A phone number must be provided if canvassed via phone or SMS" - ) + json = {"canvassContext": { + "contactTypeId": contact_type_id, + "inputTypeId": input_type_id, + "dateCanvassed": date_canvassed}, + "resultCodeId": result_code_id} if response: - json["responses"] = response + json['responses'] = response if result_code_id is not None and response is not None: raise ValueError("Both result_code_id and responses cannot be specified.") @@ -656,7 +506,8 @@ def apply_response( json["responses"] = [response] if result_code_id is not None and response is not None: - raise ValueError("Both result_code_id and responses cannot be specified.") + raise ValueError( + "Both result_code_id and responses cannot be specified.") return self.connection.post_request(url, json=json) @@ -675,12 +526,13 @@ def create_relationship(self, vanid_1, vanid_2, relationship_id): ``None`` """ - json = {"relationshipId": relationship_id, "vanId": vanid_2} + json = {'relationshipId': relationship_id, + 'vanId': vanid_2} self.connection.post_request(f"people/{vanid_1}/relationships", json=json) - logger.info(f"Relationship {vanid_1} to {vanid_2} created.") + logger.info(f'Relationship {vanid_1} to {vanid_2} created.') - def apply_person_code(self, id, code_id, id_type="vanid"): + def apply_person_code(self, id, code_id, id_type='vanid'): """ Apply a code to a person. @@ -697,7 +549,7 @@ def apply_person_code(self, id, code_id, id_type="vanid"): """ # Set url based on id_type - if id_type == "vanid": + if id_type == 'vanid': url = f"people/{id}/codes" else: url = f"people/{id_type}:{id}/codes" @@ -705,29 +557,4 @@ def apply_person_code(self, id, code_id, id_type="vanid"): json = {"codeId": code_id} self.connection.post_request(url, json=json) - logger.info(f"Code {code_id} applied to person id {id}.") - - def merge_contacts(self, primary_vanid, source_vanid): - """ - Merges two contacts in EveryAction. The source contact record will be - deleted as part of the merge and its data will be moved into the primary - contact record. In cases where fields conflict between the two contacts - and we can't keep both values, such as if the contacts have different - first names, the primary contact record's data will be retained. For - more information see the - `VAN API documentation here `_ - - `Args:` - primary_vanid: str - The VANID of the primary contact record. - source_vanid: str - The VANID of the source contact record. - `Returns:` - The VANID of the primary contact record. - """ - - url = f"people/{source_vanid}/mergeInto" - json = {"vanId": primary_vanid} - - r = self.connection.put_request(url, json=json) - return r + logger.info(f'Code {code_id} applied to person id {id}.') diff --git a/parsons/ngpvan/printed_lists.py b/parsons/ngpvan/printed_lists.py deleted file mode 100644 index 4aae931626..0000000000 --- a/parsons/ngpvan/printed_lists.py +++ /dev/null @@ -1,62 +0,0 @@ -"""NGPVAN Saved List Endpoints""" - -from parsons.etl.table import Table -import logging - -logger = logging.getLogger(__name__) - - -class PrintedLists(object): - def __init__(self, van_connection): - - self.connection = van_connection - - def get_printed_lists( - self, - generated_after=None, - generated_before=None, - created_by=None, - folder_name=None, - turf_name=None, - ): - """ - Get printed lists. - - `Args:` - folder_id: int - Filter by the id for a VAN folder. If included returns only - the saved lists in the folder - `Returns:` - Parsons Table - See :ref:`parsons-table` for output options. - """ - - params = { - "generatedAfter": generated_after, - "generatedBefore": generated_before, - "createdBy": created_by, - "folderName": folder_name, - "turfName": turf_name, - } - - params = {key: value for key, value in params.items() if value is not None} - - tbl = Table(self.connection.get_request("printedLists", params=params)) - - logger.info(f"Found {tbl.num_rows} printed lists.") - return tbl - - def get_printed_list(self, printed_list_number): - """ - Returns a printed list object. - - `Args:` - printed_list_number: int - The printed list number - `Returns:` - dict - """ - - r = self.connection.get_request(f"printedLists/{printed_list_number}") - logger.info(f"Found printed list {printed_list_number}.") - return r diff --git a/parsons/ngpvan/saved_lists.py b/parsons/ngpvan/saved_lists.py index 5423e64404..cf8d731d7e 100644 --- a/parsons/ngpvan/saved_lists.py +++ b/parsons/ngpvan/saved_lists.py @@ -10,6 +10,7 @@ class SavedLists(object): + def __init__(self, van_connection): self.connection = van_connection @@ -27,10 +28,8 @@ def get_saved_lists(self, folder_id=None): See :ref:`parsons-table` for output options. """ - tbl = Table( - self.connection.get_request("savedLists", params={"folderId": folder_id}) - ) - logger.info(f"Found {tbl.num_rows} saved lists.") + tbl = Table(self.connection.get_request('savedLists', params={'folderId': folder_id})) + logger.info(f'Found {tbl.num_rows} saved lists.') return tbl def get_saved_list(self, saved_list_id): @@ -44,8 +43,8 @@ def get_saved_list(self, saved_list_id): dict """ - r = self.connection.get_request(f"savedLists/{saved_list_id}") - logger.info(f"Found saved list {saved_list_id}.") + r = self.connection.get_request(f'savedLists/{saved_list_id}') + logger.info(f'Found saved list {saved_list_id}.') return r def download_saved_list(self, saved_list_id): @@ -66,24 +65,12 @@ def download_saved_list(self, saved_list_id): if isinstance(job, tuple): return job else: - return Table.from_csv(job["downloadUrl"]) - - def upload_saved_list_rest( - self, - tbl, - url_type, - folder_id, - list_name, - description, - callback_url, - columns, - id_column, - delimiter="csv", - header=True, - quotes=True, - overwrite=None, - **url_kwargs, - ): + return Table.from_csv(job['downloadUrl']) + + def upload_saved_list_rest(self, tbl, url_type, folder_id, list_name, + description, callback_url, columns, id_column, + delimiter='csv', header=True, quotes=True, + overwrite=None, **url_kwargs): """ Upload a saved list. Invalid or unmatched person id records will be ignored. Your api user must be shared on the target folder. @@ -115,8 +102,7 @@ def upload_saved_list_rest( Whether or not fields are enclosed in quotation marks within each column of the file. overwrite: int - Replace saved list if already exists. Pass in the list id of the - existing list that you would like to overwrite. + Replace saved list if already exists. **url_kwargs: kwargs Arguments to configure your cloud storage url type. See :ref:`Cloud Storage ` for more details. @@ -126,79 +112,60 @@ def upload_saved_list_rest( records in your list. """ rando = str(uuid.uuid1()) - file_name = rando + ".csv" - url = cloud_storage.post_file( - tbl, url_type, file_path=rando + ".zip", **url_kwargs - ) - logger.info(f"Table uploaded to {url_type}.") + file_name = rando + '.csv' + url = cloud_storage.post_file(tbl, url_type, file_path=rando + '.zip', **url_kwargs) + url_for_van = url.split('?')[0] # hack around github.com/move-coop/parsons/issues/513 + logger.info(f'Table uploaded to {url_type}.') # VAN errors for this method are not particularly useful or helpful. For that reason, we # will check that the folder exists and if the list already exists. - logger.info("Validating folder id and list name.") - if folder_id not in [x["folderId"] for x in self.get_folders()]: + logger.info('Validating folder id and list name.') + if folder_id not in [x['folderId'] for x in self.get_folders()]: raise ValueError("Folder does not exist or is not shared with API user.") - if ( - list_name in [x["name"] for x in self.get_saved_lists(folder_id)] - and not overwrite - ): - raise ValueError( - "Saved list already exists. Set overwrite " - "argument to list ID or change list name." - ) + if list_name in [x['name'] for x in self.get_saved_lists(folder_id)]: + raise ValueError("Saved list already exists. Set overwrite " + "argument to list ID or change list name.") - if delimiter not in ["csv", "tab", "pipe"]: + if delimiter not in ['csv', 'tab', 'pipe']: raise ValueError("Delimiter must be one of 'csv', 'tab' or 'pipe'") - columns = [{"name": c} for c in columns] + columns = [{'name': c} for c in columns] delimiter = delimiter.capitalize() - json = { - "description": description, - "file": { - "columnDelimiter": delimiter, - "columns": columns, - "fileName": file_name, - "hasHeader": header, - "hasQuotes": quotes, - "sourceUrl": url, - }, - "actions": [ - { - "actionType": "LoadSavedListFile", - "listDescription": description, - "listName": list_name, - "personIdColumn": id_column, - "folderId": folder_id, - "personIdType": "VANID", + json = {"description": description, + "file": { + "columnDelimiter": delimiter, + "columns": columns, + "fileName": file_name, + "hasHeader": header, + "hasQuotes": quotes, + "sourceUrl": url_for_van + }, + "actions": [ + {"actionType": "LoadSavedListFile", + "listDescription": description, + "listName": list_name, + "personIdColumn": id_column, + "folderId": folder_id, + "personIdType": "VANID"}], + "listeners": [ + {"type": "URL", + "value": callback_url}] } - ], - "listeners": [{"type": "URL", "value": callback_url}], - } if overwrite: json["actions"][0]["overwriteExistingListId"] = overwrite - file_load_job_response = self.connection.post_request( - "fileLoadingJobs", json=json - ) - job_id = file_load_job_response["jobId"] - logger.info( - f"Saved list job {job_id} created. Reference " - "callback url to check for job status" - ) + logger.info(json) + file_load_job_response = self.connection.post_request('fileLoadingJobs', json=json) + job_id = file_load_job_response['jobId'] + logger.info(f'Score loading job {job_id} created. Reference ' + 'callback url to check for job status') return file_load_job_response - def upload_saved_list( - self, - tbl, - list_name, - folder_id, - url_type, - id_type="vanid", - replace=False, - **url_kwargs, - ): + def upload_saved_list(self, tbl, list_name, folder_id, url_type, id_type='vanid', replace=False, + **url_kwargs): """ .. warning:: .. deprecated:: 0.X Use :func:`parsons.VAN.upload_saved_list_rest` instead. @@ -231,69 +198,58 @@ def upload_saved_list( """ # Move to cloud storage file_name = str(uuid.uuid1()) - url = cloud_storage.post_file( - tbl, url_type, file_path=file_name + ".zip", **url_kwargs - ) - logger.info(f"Table uploaded to {url_type}.") + url = cloud_storage.post_file(tbl, url_type, file_path=file_name + '.zip', **url_kwargs) + logger.info(f'Table uploaded to {url_type}.') # VAN errors for this method are not particularly useful or helpful. For that reason, we # will check that the folder exists and if the list already exists. - logger.info("Validating folder id and list name.") - if folder_id not in [x["folderId"] for x in self.get_folders()]: + logger.info('Validating folder id and list name.') + if folder_id not in [x['folderId'] for x in self.get_folders()]: raise ValueError("Folder does not exist or is not shared with API user.") if not replace: - if list_name in [x["name"] for x in self.get_saved_lists(folder_id)]: - raise ValueError( - "Saved list already exists. Set to replace argument to True or " - "change list name." - ) + if list_name in [x['name'] for x in self.get_saved_lists(folder_id)]: + raise ValueError("Saved list already exists. Set to replace argument to True or " + "change list name.") # i think we dont need this if we have the warning in the funciton description, # perhapse a style/standanrds decision - if id_type == "vanid": - logger.warning( - "The NVPVAN SOAP API is deprecated, consider using " - "parsons.VAN.upload_saved_list_rest if you are " - "uploading a list of vanids." - ) + if id_type == 'vanid': + logger.warning('The NVPVAN SOAP API is deprecated, consider using ' + 'parsons.VAN.upload_saved_list_rest if you are ' + 'uploading a list of vanids.') # Create XML - xml = self.connection.soap_client.factory.create( - "CreateAndStoreSavedListMetaData" - ) + xml = self.connection.soap_client.factory.create('CreateAndStoreSavedListMetaData') xml.SavedList._Name = list_name xml.DestinationFolder._ID = folder_id - xml.SourceFile.FileName = file_name + ".csv" + xml.SourceFile.FileName = file_name + '.csv' xml.SourceFile.FileUrl = url - xml.SourceFile.FileCompression = "zip" + xml.SourceFile.FileCompression = 'zip' xml.Options.OverwriteExistingList = replace # Describe file - file_desc = self.connection.soap_client.factory.create( - "SeparatedFileFormatDescription" - ) - file_desc._name = "csv" + file_desc = self.connection.soap_client.factory.create('SeparatedFileFormatDescription') + file_desc._name = 'csv' file_desc.HasHeaderRow = True # Only support single column for now - col = self.connection.soap_client.factory.create("Column") + col = self.connection.soap_client.factory.create('Column') col.Name = id_type - col.RefersTo._Path = f"Person[@PersonIDType='{id_type}']" - col._Index = "0" + col.RefersTo._Path = f"Person[@PersonIDType=\'{id_type}\']" + col._Index = '0' # Assemble request file_desc.Columns.Column.append(col) xml.SourceFile.Format = file_desc - r = Client.dict( - self.connection.soap_client.service.CreateAndStoreSavedList(xml) - ) + r = Client.dict(self.connection.soap_client.service.CreateAndStoreSavedList(xml)) if r: logger.info(f"Uploaded {r['ListSize']} records to {r['_Name']} saved list.") return r class Folders(object): + def __init__(self, van_connection): # Some sort of test if the van_connection is not present. @@ -309,8 +265,8 @@ def get_folders(self): See :ref:`parsons-table` for output options. """ - tbl = Table(self.connection.get_request("folders")) - logger.info(f"Found {tbl.num_rows} folders.") + tbl = Table(self.connection.get_request('folders')) + logger.info(f'Found {tbl.num_rows} folders.') return tbl def get_folder(self, folder_id): @@ -325,12 +281,13 @@ def get_folder(self, folder_id): See :ref:`parsons-table` for output options. """ - r = self.connection.get_request(f"folders/{folder_id}") - logger.info(f"Found folder {folder_id}.") + r = self.connection.get_request(f'folders/{folder_id}') + logger.info(f'Found folder {folder_id}.') return r class ExportJobs(object): + def __init__(self, van_connection): self.connection = van_connection @@ -344,13 +301,12 @@ def get_export_job_types(self): See :ref:`parsons-table` for output options. """ - tbl = Table(self.connection.get_request("exportJobTypes")) - logger.info(f"Found {tbl.num_rows} export job types.") + tbl = Table(self.connection.get_request('exportJobTypes')) + logger.info(f'Found {tbl.num_rows} export job types.') return tbl - def export_job_create( - self, list_id, export_type=4, webhookUrl="https://www.nothing.com" - ): + def export_job_create(self, list_id, export_type=4, + webhookUrl="https://www.nothing.com"): """ Creates an export job @@ -370,14 +326,13 @@ def export_job_create( The export job object """ - json = { - "savedListId": str(list_id), - "type": str(export_type), - "webhookUrl": webhookUrl, - } + json = {"savedListId": str(list_id), + "type": str(export_type), + "webhookUrl": webhookUrl + } - r = self.connection.post_request("exportJobs", json=json) - logger.info("Retrieved export job.") + r = self.connection.post_request('exportJobs', json=json) + logger.info('Retrieved export job.') return r def get_export_job(self, export_job_id): @@ -392,6 +347,6 @@ def get_export_job(self, export_job_id): See :ref:`parsons-table` for output options. """ - r = self.connection.get_request(f"exportJobs/{export_job_id}") - logger.info(f"Found export job {export_job_id}.") + r = self.connection.get_request(f'exportJobs/{export_job_id}') + logger.info(f'Found export job {export_job_id}.') return r diff --git a/parsons/ngpvan/scores.py b/parsons/ngpvan/scores.py index 1a652d513b..6216802abd 100644 --- a/parsons/ngpvan/scores.py +++ b/parsons/ngpvan/scores.py @@ -10,6 +10,7 @@ class Scores(object): + def __init__(self, van_connection): self.connection = van_connection @@ -23,8 +24,8 @@ def get_scores(self): See :ref:`parsons-table` for output options. """ - tbl = Table(self.connection.get_request("scores")) - logger.info(f"Found {tbl.num_rows} scores.") + tbl = Table(self.connection.get_request('scores')) + logger.info(f'Found {tbl.num_rows} scores.') return tbl def get_score(self, score_id): @@ -38,8 +39,8 @@ def get_score(self, score_id): dict """ - r = self.connection.get_request(f"scores/{score_id}") - logger.info(f"Found score {score_id}.") + r = self.connection.get_request(f'scores/{score_id}') + logger.info(f'Found score {score_id}.') return r def get_score_updates(self, created_before=None, created_after=None, score_id=None): @@ -58,17 +59,15 @@ def get_score_updates(self, created_before=None, created_after=None, score_id=No See :ref:`parsons-table` for output options. """ - params = { - "createdBefore": created_before, - "createdAfter": created_after, - "scoreId": score_id, - } + params = {'createdBefore': created_before, + 'createdAfter': created_after, + 'scoreId': score_id} - tbl = Table(self.connection.get_request("scoreUpdates", params=params)) + tbl = Table(self.connection.get_request('scoreUpdates', params=params)) if tbl.num_rows: - tbl.unpack_dict("updateStatistics", prepend=False) - tbl.unpack_dict("score", prepend=False) - logger.info(f"Found {tbl.num_rows} score updates.") + tbl.unpack_dict('updateStatistics', prepend=False) + tbl.unpack_dict('score', prepend=False) + logger.info(f'Found {tbl.num_rows} score updates.') return tbl def get_score_update(self, score_update_id): @@ -82,8 +81,8 @@ def get_score_update(self, score_update_id): dict """ - r = self.connection.get_request(f"scoreUpdates/{score_update_id}") - logger.info(f"Returning score update {score_update_id}.") + r = self.connection.get_request(f'scoreUpdates/{score_update_id}') + logger.info(f'Returning score update {score_update_id}.') return r def update_score_status(self, score_update_id, status): @@ -100,36 +99,26 @@ def update_score_status(self, score_update_id, status): ``None`` """ - if status not in ["pending approval", "approved", "disapproved", "canceled"]: + if status not in ['pending approval', 'approved', 'disapproved', + 'canceled']: - raise ValueError( - """Valid inputs for status are, 'pending approval', - 'approved','disapproved','canceled'""" - ) + raise ValueError("""Valid inputs for status are, 'pending approval', + 'approved','disapproved','canceled'""") else: - if status == "pending approval": - status = "PendingApproval" + if status == 'pending approval': + status = 'PendingApproval' else: status = status.capitalize() json = {"loadStatus": status} - r = self.connection.patch_request(f"scoreUpdates/{score_update_id}", json=json) - logger.info(f"Score {score_update_id} status updated to {status}.") + r = self.connection.patch_request(f'scoreUpdates/{score_update_id}', json=json) + logger.info(f'Score {score_update_id} status updated to {status}.') return r - def upload_scores( - self, - tbl, - config, - url_type, - id_type="vanid", - email=None, - auto_approve=True, - approve_tolerance=0.1, - **url_kwargs, - ): + def upload_scores(self, tbl, config, url_type, id_type='vanid', email=None, auto_approve=True, + approve_tolerance=.1, **url_kwargs): """ Upload scores. Use to create or overwrite scores. Multiple score loads should be configured in a single call. [1]_ @@ -182,76 +171,55 @@ def upload_scores( # Move to cloud storage file_name = str(uuid.uuid1()) - url = cloud_storage.post_file( - tbl, url_type, file_path=file_name + ".zip", **url_kwargs - ) - logger.info(f"Table uploaded to {url_type}.") + url = cloud_storage.post_file(tbl, url_type, file_path=file_name + '.zip', **url_kwargs) + logger.info(f'Table uploaded to {url_type}.') # Generate shell request - json = { - "description": "A description", - "file": { - "columnDelimiter": "csv", - "columns": [{"name": c} for c in tbl.columns], - "fileName": file_name + ".csv", - "hasHeader": "True", - "hasQuotes": "False", - "sourceUrl": url, - }, - "actions": [], - } + json = {"description": 'A description', + "file": { + "columnDelimiter": 'csv', + "columns": [{'name': c} for c in tbl.columns], + "fileName": file_name + '.csv', + "hasHeader": "True", + "hasQuotes": "False", + "sourceUrl": url}, + "actions": [] + } # Configure each score for i in config: - action = { - "actionType": "score", - "personIdColumn": tbl.columns[0], - "personIdType": id_type, - "scoreColumn": i["score_column"], - "scoreId": i["score_id"], - } + action = {"actionType": "score", + "personIdColumn": tbl.columns[0], + "personIdType": id_type, + "scoreColumn": i['score_column'], + "scoreId": i['score_id']} if auto_approve: - average = petl.stats(tbl.table, i["score_column"]).mean - action["approvalCriteria"] = { - "average": average, - "tolerance": approve_tolerance, - } + average = petl.stats(tbl.table, i['score_column']).mean + action['approvalCriteria'] = {"average": average, "tolerance": approve_tolerance} - json["actions"].append(action) + json['actions'].append(action) # Add email listener if email: - json["listeners"] = [{"type": "EMAIL", "value": email}] + json['listeners'] = [{"type": "EMAIL", 'value': email}] # Upload scores - r = self.connection.post_request("fileLoadingJobs", json=json) + r = self.connection.post_request('fileLoadingJobs', json=json) logger.info(f"Scores job {r['jobId']} created.") - return r["jobId"] + return r['jobId'] class FileLoadingJobs(object): + def __init__(self, van_connection): self.connection = van_connection - def create_file_load( - self, - file_name, - file_url, - columns, - id_column, - id_type, - score_id, - score_column, - delimiter="csv", - header=True, - quotes=True, - description=None, - email=None, - auto_average=None, - auto_tolerance=None, - ): + def create_file_load(self, file_name, file_url, columns, id_column, id_type, + score_id, score_column, delimiter='csv', header=True, quotes=True, + description=None, email=None, auto_average=None, + auto_tolerance=None): """ .. warning:: .. deprecated:: 0.7 Use :func:`parsons.VAN.upload_scores` instead. @@ -289,63 +257,48 @@ def create_file_load( The file load id """ - columns = [{"name": c} for c in columns] + columns = [{'name': c} for c in columns] # To Do: Validate that it is a .zip file. Not entirely sure if this is possible # as some urls might not end in ".zip". - if delimiter not in ["csv", "tab", "pipe"]: + if delimiter not in ['csv', 'tab', 'pipe']: raise ValueError("Delimiter must be one of 'csv', 'tab' or 'pipe'") delimiter = delimiter.capitalize() - json = { - "description": "A description", - "file": { - "columnDelimiter": delimiter, - "columns": columns, - "fileName": file_name, - "hasHeader": header, - "hasQuotes": quotes, - "sourceUrl": file_url, - }, - "actions": [ - { - "actionType": "score", - "personIdColumn": id_column, - "personIdType": id_type, - "scoreColumn": score_column, - "scoreId": score_id, + json = {"description": 'A description', + "file": { + "columnDelimiter": delimiter, + "columns": columns, + "fileName": file_name, + "hasHeader": header, + "hasQuotes": quotes, + "sourceUrl": file_url + }, + "actions": [ + {"actionType": "score", + "personIdColumn": id_column, + "personIdType": id_type, + "scoreColumn": score_column, + "scoreId": score_id}], + "listeners": [ + {"type": "EMAIL", + "value": email}] } - ], - "listeners": [{"type": "EMAIL", "value": email}], - } if auto_average and auto_tolerance: - json["actions"]["approvalCriteria"] = { - "average": auto_average, - "tolerance": auto_tolerance, - } + json["actions"]["approvalCriteria"] = {"average": auto_average, + "tolerance": auto_tolerance} - r = self.connection.post_request("fileLoadingJobs", json=json)["jobId"] - logger.info(f"Score loading job {r} created.") + r = self.connection.post_request('fileLoadingJobs', json=json)['jobId'] + logger.info(f'Score loading job {r} created.') return r - def create_file_load_multi( - self, - file_name, - file_url, - columns, - id_column, - id_type, - score_map, - delimiter="csv", - header=True, - quotes=True, - description=None, - email=None, - ): + def create_file_load_multi(self, file_name, file_url, columns, id_column, id_type, + score_map, delimiter='csv', header=True, quotes=True, + description=None, email=None): """ .. warning:: .. deprecated:: 0.7 Use :func:`parsons.VAN.upload_scores` instead. @@ -381,49 +334,49 @@ def create_file_load_multi( The file load job id """ - columns = [{"name": c} for c in columns] + columns = [{'name': c} for c in columns] # To Do: Validate that it is a .zip file. Not entirely sure if this is possible # as some urls might not end in ".zip". - if delimiter not in ["csv", "tab", "pipe"]: + if delimiter not in ['csv', 'tab', 'pipe']: raise ValueError("Delimiter must be one of 'csv', 'tab' or 'pipe'") delimiter = delimiter.capitalize() - json = { - "description": "A description", - "file": { - "columnDelimiter": delimiter, - "columns": columns, - "fileName": file_name, - "hasHeader": header, - "hasQuotes": quotes, - "sourceUrl": file_url, - }, - "listeners": [{"type": "EMAIL", "value": email}], - } + json = {"description": 'A description', + "file": { + "columnDelimiter": delimiter, + "columns": columns, + "fileName": file_name, + "hasHeader": header, + "hasQuotes": quotes, + "sourceUrl": file_url + }, + "listeners": [ + {"type": "EMAIL", + "value": email}] + } actions = [] for score in score_map: - action = { - "actionType": "score", - "personIdColumn": id_column, - "personIdType": id_type, - "scoreColumn": score["score_column"], - "scoreId": score["score_id"], - "approvalCriteria": { - "average": score["auto_average"], - "tolerance": score["auto_tolerance"], - }, - } + action = {"actionType": "score", + "personIdColumn": id_column, + "personIdType": id_type, + "scoreColumn": score['score_column'], + "scoreId": score['score_id'], + "approvalCriteria": { + "average": score['auto_average'], + "tolerance": score['auto_tolerance'] + } + } actions.append(action) - json["actions"] = actions + json['actions'] = actions - r = self.connection.post_request("fileLoadingJobs", json=json)["jobId"] - logger.info(f"Score loading job {r} created.") + r = self.connection.post_request('fileLoadingJobs', json=json)['jobId'] + logger.info(f'Score loading job {r} created.') return r diff --git a/parsons/ngpvan/signups.py b/parsons/ngpvan/signups.py index b5487551cb..9fc439126e 100644 --- a/parsons/ngpvan/signups.py +++ b/parsons/ngpvan/signups.py @@ -6,6 +6,7 @@ class Signups(object): + def __init__(self, van_connection): self.connection = van_connection @@ -27,18 +28,18 @@ def get_signups_statuses(self, event_id=None, event_type_id=None): """ if event_id is None and event_type_id is None: - raise ValueError("One of event_id or event_type_id must be populated") + raise ValueError('One of event_id or event_type_id must be populated') if event_id is not None and event_type_id is not None: - raise ValueError("Event Id and Event Type ID may not BOTH be populated") + raise ValueError('Event Id and Event Type ID may not BOTH be populated') if event_id: - params = {"eventId": event_id} + params = {'eventId': event_id} if event_type_id: - params = {"eventTypeId": event_type_id} + params = {'eventTypeId': event_type_id} - tbl = Table(self.connection.get_request("signups/statuses", params=params)) - logger.info(f"Found {tbl.num_rows} signups.") + tbl = Table(self.connection.get_request('signups/statuses', params=params)) + logger.info(f'Found {tbl.num_rows} signups.') return tbl def get_person_signups(self, vanid): @@ -53,8 +54,8 @@ def get_person_signups(self, vanid): See :ref:`parsons-table` for output options. """ - tbl = Table(self.connection.get_request("signups", params={"vanID": vanid})) - logger.info(f"Found {tbl.num_rows} signups for {vanid}.") + tbl = Table(self.connection.get_request('signups', params={'vanID': vanid})) + logger.info(f'Found {tbl.num_rows} signups for {vanid}.') return self._unpack_signups(tbl) def get_event_signups(self, event_id): @@ -69,10 +70,8 @@ def get_event_signups(self, event_id): See :ref:`parsons-table` for output options. """ - tbl = Table( - self.connection.get_request("signups", params={"eventId": event_id}) - ) - logger.info(f"Found {tbl.num_rows} signups for event {event_id}.") + tbl = Table(self.connection.get_request('signups', params={'eventId': event_id})) + logger.info(f'Found {tbl.num_rows} signups for event {event_id}.') return self._unpack_signups(tbl) def get_signup(self, event_signup_id): @@ -87,8 +86,8 @@ def get_signup(self, event_signup_id): See :ref:`parsons-table` for output options. """ - r = self.connection.get_request(f"signups/{event_signup_id}") - logger.info(f"Found sign up {event_signup_id}.") + r = self.connection.get_request(f'signups/{event_signup_id}') + logger.info(f'Found sign up {event_signup_id}.') return r def create_signup(self, vanid, event_id, shift_id, role_id, status_id, location_id): @@ -113,27 +112,20 @@ def create_signup(self, vanid, event_id, shift_id, role_id, status_id, location_ The event signup id """ - signup = { - "person": {"vanId": vanid}, - "event": {"eventId": event_id}, - "shift": {"eventShiftId": shift_id}, - "role": {"roleId": role_id}, - "status": {"statusId": status_id}, - "location": {"locationId": location_id}, - } - - r = self.connection.post_request("signups", json=signup) - logger.info(f"Signup {r} created.") + signup = {'person': {'vanId': vanid}, + 'event': {'eventId': event_id}, + 'shift': {'eventShiftId': shift_id}, + 'role': {'roleId': role_id}, + 'status': {'statusId': status_id}, + 'location': {'locationId': location_id} + } + + r = self.connection.post_request('signups', json=signup) + logger.info(f'Signup {r} created.') return r - def update_signup( - self, - event_signup_id, - shift_id=None, - role_id=None, - status_id=None, - location_id=None, - ): + def update_signup(self, event_signup_id, shift_id=None, role_id=None, status_id=None, + location_id=None): """ Update a signup object. All of the kwargs will update the values associated with them. @@ -154,19 +146,19 @@ def update_signup( """ # Get the signup object - signup = self.connection.get_request(f"signups/{event_signup_id}") + signup = self.connection.get_request(f'signups/{event_signup_id}') # Update the signup object if shift_id: - signup["shift"] = {"eventShiftId": shift_id} + signup['shift'] = {'eventShiftId': shift_id} if role_id: - signup["role"] = {"roleId": role_id} + signup['role'] = {'roleId': role_id} if status_id: - signup["status"] = {"statusId": status_id} + signup['status'] = {'statusId': status_id} if location_id: - signup["location"] = {"locationId": location_id} + signup['location'] = {'locationId': location_id} - return self.connection.put_request(f"signups/{event_signup_id}", json=signup) + return self.connection.put_request(f'signups/{event_signup_id}', json=signup) def delete_signup(self, event_signup_id): """ @@ -179,18 +171,18 @@ def delete_signup(self, event_signup_id): ``None`` """ - r = self.connection.delete_request(f"signups/{event_signup_id}") - logger.info(f"Signup {event_signup_id} deleted.") + r = self.connection.delete_request(f'signups/{event_signup_id}') + logger.info(f'Signup {event_signup_id} deleted.') return r def _unpack_signups(self, table): # Unpack all of the nested jsons - table.unpack_dict("person", prepend=False) - table.unpack_dict("status") - table.unpack_dict("event") - table.unpack_dict("shift") - table.unpack_dict("role") - table.unpack_dict("location") + table.unpack_dict('person', prepend=False) + table.unpack_dict('status') + table.unpack_dict('event') + table.unpack_dict('shift') + table.unpack_dict('role') + table.unpack_dict('location') return table diff --git a/parsons/ngpvan/supporter_groups.py b/parsons/ngpvan/supporter_groups.py index d50ba0c013..64c2d90257 100644 --- a/parsons/ngpvan/supporter_groups.py +++ b/parsons/ngpvan/supporter_groups.py @@ -6,6 +6,7 @@ class SupporterGroups(object): + def __init__(self, van_connection): self.connection = van_connection @@ -19,8 +20,8 @@ def get_supporter_groups(self): See :ref:`parsons-table` for output options. """ - tbl = Table(self.connection.get_request("supporterGroups")) - logger.info(f"Found {tbl.num_rows} supporter groups.") + tbl = Table(self.connection.get_request('supporterGroups')) + logger.info(f'Found {tbl.num_rows} supporter groups.') return tbl def get_supporter_group(self, supporter_group_id): @@ -34,8 +35,8 @@ def get_supporter_group(self, supporter_group_id): dict """ - r = self.connection.get_request(f"supporterGroups/{supporter_group_id}") - logger.info(f"Found supporter group {supporter_group_id}.") + r = self.connection.get_request(f'supporterGroups/{supporter_group_id}') + logger.info(f'Found supporter group {supporter_group_id}.') return r def create_supporter_group(self, name, description): @@ -52,8 +53,8 @@ def create_supporter_group(self, name, description): and description """ - json = {"name": name, "description": description} - r = self.connection.post_request("supporterGroups", json=json) + json = {'name': name, 'description': description} + r = self.connection.post_request('supporterGroups', json=json) return r def delete_supporter_group(self, supporter_group_id): @@ -67,8 +68,8 @@ def delete_supporter_group(self, supporter_group_id): ``None`` """ - r = self.connection.delete_request(f"supporterGroups/{supporter_group_id}") - logger.info(f"Deleted supporter group {supporter_group_id}.") + r = self.connection.delete_request(f'supporterGroups/{supporter_group_id}') + logger.info(f'Deleted supporter group {supporter_group_id}.') return r def add_person_supporter_group(self, supporter_group_id, vanid): @@ -84,10 +85,8 @@ def add_person_supporter_group(self, supporter_group_id, vanid): ``None`` """ - r = self.connection.put_request( - f"supporterGroups/{supporter_group_id}/people/{vanid}" - ) - logger.info(f"Added person {vanid} to {supporter_group_id} supporter group.") + r = self.connection.put_request(f'supporterGroups/{supporter_group_id}/people/{vanid}') + logger.info(f'Added person {vanid} to {supporter_group_id} supporter group.') return r def delete_person_supporter_group(self, supporter_group_id, vanid): @@ -103,10 +102,6 @@ def delete_person_supporter_group(self, supporter_group_id, vanid): ``None`` """ - r = self.connection.delete_request( - f"supporterGroups/{supporter_group_id}/people/{vanid}" - ) - logger.info( - f"Deleted person {vanid} from {supporter_group_id} supporter group." - ) + r = self.connection.delete_request(f'supporterGroups/{supporter_group_id}/people/{vanid}') + logger.info(f'Deleted person {vanid} from {supporter_group_id} supporter group.') return r diff --git a/parsons/ngpvan/survey_questions.py b/parsons/ngpvan/survey_questions.py index a00bc25fe1..2563cfdbaa 100644 --- a/parsons/ngpvan/survey_questions.py +++ b/parsons/ngpvan/survey_questions.py @@ -6,13 +6,13 @@ class SurveyQuestions(object): + def __init__(self, van_connection): self.connection = van_connection - def get_survey_questions( - self, statuses=["Active"], name=None, sq_type=None, question=None, cycle=None - ): + def get_survey_questions(self, statuses=['Active'], name=None, sq_type=None, question=None, + cycle=None): """ Get survey questions. @@ -33,17 +33,15 @@ def get_survey_questions( See :ref:`parsons-table` for output options. """ - params = { - "statuses": statuses, - "$top": self.page_size, - "name": name, - "type": sq_type, - "question": question, - "cycle": cycle, - } - - tbl = Table(self.connection.get_request("surveyQuestions", params=params)) - logger.info(f"Found {tbl.num_rows} survey questions.") + params = {'statuses': statuses, + '$top': self.page_size, + 'name': name, + 'type': sq_type, + 'question': question, + 'cycle': cycle} + + tbl = Table(self.connection.get_request('surveyQuestions', params=params)) + logger.info(f'Found {tbl.num_rows} survey questions.') return tbl def get_survey_question(self, survey_question_id): @@ -58,21 +56,13 @@ def get_survey_question(self, survey_question_id): See :ref:`parsons-table` for output options. """ - r = self.connection.get_request(f"surveyQuestions/{survey_question_id}") - logger.info(f"Found survey question {survey_question_id}.") + r = self.connection.get_request(f'surveyQuestions/{survey_question_id}') + logger.info(f'Found survey question {survey_question_id}.') return r - def apply_survey_response( - self, - id, - survey_question_id, - survey_response_id, - id_type="vanid", - result_code_id=None, - contact_type_id=None, - input_type_id=None, - date_canvassed=None, - ): + def apply_survey_response(self, id, survey_question_id, survey_response_id, + id_type='vanid', result_code_id=None, contact_type_id=None, + input_type_id=None, date_canvassed=None): """ Apply a single survey response to a person. @@ -99,19 +89,11 @@ def apply_survey_response( `Optional`; ISO 8601 formatted date. Defaults to todays date """ - response = { - "surveyQuestionId": survey_question_id, - "surveyResponseId": survey_response_id, - "type": "surveyResponse", - } - - logger.info(f"Applying survey question {survey_question_id} to {id_type} {id}") - self.apply_response( - id, - response, - id_type, - result_code_id=result_code_id, - contact_type_id=contact_type_id, - input_type_id=input_type_id, - date_canvassed=date_canvassed, - ) + response = {"surveyQuestionId": survey_question_id, + "surveyResponseId": survey_response_id, + "type": "surveyResponse"} + + logger.info(f'Applying survey question {survey_question_id} to {id_type} {id}') + self.apply_response(id, response, id_type, result_code_id=result_code_id, + contact_type_id=contact_type_id, input_type_id=input_type_id, + date_canvassed=date_canvassed) diff --git a/parsons/ngpvan/targets.py b/parsons/ngpvan/targets.py index ee6490e7bf..cb06ce674c 100644 --- a/parsons/ngpvan/targets.py +++ b/parsons/ngpvan/targets.py @@ -2,7 +2,8 @@ from parsons.etl.table import Table import logging -import petl +import json +import requests logger = logging.getLogger(__name__) @@ -12,6 +13,7 @@ class TargetsFailed(Exception): class Targets(object): + def __init__(self, van_connection): self.connection = van_connection @@ -28,8 +30,8 @@ def get_targets(self): See :ref:`parsons-table` for output options. """ - tbl = Table(self.connection.get_request("targets")) - logger.info(f"Found {tbl.num_rows} targets.") + tbl = Table(self.connection.get_request('targets')) + logger.info(f'Found {tbl.num_rows} targets.') return tbl def get_target(self, target_id): @@ -44,8 +46,8 @@ def get_target(self, target_id): The target """ - r = self.connection.get_request(f"targets/{target_id}") - logger.info(f"Found target {target_id}.") + r = self.connection.get_request(f'targets/{target_id}') + logger.info(f'Found target {target_id}.') return r def get_target_export(self, export_job_id): @@ -57,17 +59,20 @@ def get_target_export(self, export_job_id): See :ref:`parsons-table` for output options. """ - response = self.connection.get_request(f"targetExportJobs/{export_job_id}") - job_status = response.get("jobStatus") - if job_status == "Complete": - url = response["file"]["downloadUrl"] - return Table(petl.fromcsv(url, encoding="utf-8-sig")) - elif job_status == "Pending" or job_status == "InProcess": - logger.info( - f"Target export job is pending or in process for {export_job_id}." - ) + response = self.connection.get_request(f'targetExportJobs/{export_job_id}') + json_string = json.dumps(response) + json_obj = json.loads(json_string) + for i in json_obj: + job_status = i['jobStatus'] + if job_status == 'Complete': + for j in json_obj: + csv = j['file']['downloadUrl'] + response_csv = requests.get(csv) + return Table.from_csv_string(response_csv.text) + elif job_status == 'Pending' or job_status == 'InProcess': + logger.info(f'Target export job is pending or in process for {export_job_id}.') else: - raise TargetsFailed(f"Target export failed for {export_job_id}") + raise TargetsFailed(f'Target export failed for {export_job_id}') def create_target_export(self, target_id, webhook_url=None): """ @@ -80,8 +85,10 @@ def create_target_export(self, target_id, webhook_url=None): dict The target export job ID """ - target_export = {"targetId": target_id} + target_export = { + 'targetId': target_id + } - r = self.connection.post_request("targetExportJobs", json=target_export) - logger.info(f"Created new target export job for {target_id}.") + r = self.connection.post_request('targetExportJobs', json=target_export) + logger.info(f'Created new target export job for {target_id}.') return r diff --git a/parsons/ngpvan/utilities.py b/parsons/ngpvan/utilities.py index 1f31f5e989..7abd662f2a 100644 --- a/parsons/ngpvan/utilities.py +++ b/parsons/ngpvan/utilities.py @@ -1,3 +1,5 @@ + + def action_parse(action): """ Internal method to parse and validate actions, which are required for some methods @@ -6,7 +8,7 @@ def action_parse(action): action = action.capitalize() - if action not in ("Apply", "Remove"): + if action not in ('Apply', 'Remove'): raise ValueError("Action must be either 'Apply' or 'Remove'") @@ -19,6 +21,6 @@ def list_to_string(string_arg): """ if string_arg: - return ".".join(string_arg) + return '.'.join(string_arg) else: return string_arg diff --git a/parsons/ngpvan/van.py b/parsons/ngpvan/van.py index b019e391f5..6d0f315856 100644 --- a/parsons/ngpvan/van.py +++ b/parsons/ngpvan/van.py @@ -13,36 +13,15 @@ from parsons.ngpvan.locations import Locations from parsons.ngpvan.bulk_import import BulkImport from parsons.ngpvan.changed_entities import ChangedEntities -from parsons.ngpvan.contact_notes import ContactNotes from parsons.ngpvan.custom_fields import CustomFields from parsons.ngpvan.targets import Targets -from parsons.ngpvan.printed_lists import PrintedLists logger = logging.getLogger(__name__) -class VAN( - People, - Events, - SavedLists, - PrintedLists, - Folders, - ExportJobs, - ActivistCodes, - CanvassResponses, - SurveyQuestions, - Codes, - Scores, - FileLoadingJobs, - SupporterGroups, - Signups, - Locations, - BulkImport, - ChangedEntities, - ContactNotes, - CustomFields, - Targets, -): +class VAN(People, Events, SavedLists, Folders, ExportJobs, ActivistCodes, CanvassResponses, + SurveyQuestions, Codes, Scores, FileLoadingJobs, SupporterGroups, Signups, Locations, + BulkImport, ChangedEntities, CustomFields, Targets): """ Returns the VAN class @@ -61,9 +40,7 @@ class VAN( VAN object """ - def __init__( - self, api_key=None, auth_name="default", db=None, raise_for_status=True - ): + def __init__(self, api_key=None, auth_name='default', db=None, raise_for_status=True): self.connection = VANConnector(api_key=api_key, db=db) self.api_key = api_key diff --git a/parsons/ngpvan/van_connector.py b/parsons/ngpvan/van_connector.py index 77160615ac..4c8f3f937b 100644 --- a/parsons/ngpvan/van_connector.py +++ b/parsons/ngpvan/van_connector.py @@ -5,36 +5,31 @@ logger = logging.getLogger(__name__) -URI = "https://api.securevan.com/v4/" -SOAP_URI = "https://api.securevan.com/Services/V3/ListService.asmx?WSDL" +URI = 'https://api.securevan.com/v4/' +SOAP_URI = 'https://api.securevan.com/Services/V3/ListService.asmx?WSDL' class VANConnector(object): - def __init__(self, api_key=None, auth_name="default", db=None): - self.api_key = check_env.check("VAN_API_KEY", api_key) + def __init__(self, api_key=None, auth_name='default', db=None): - if db == "MyVoters": + self.api_key = check_env.check('VAN_API_KEY', api_key) + + if db == 'MyVoters': self.db_code = 0 - elif db in ["MyMembers", "MyCampaign", "EveryAction"]: + elif db in ['MyMembers', 'MyCampaign', 'EveryAction']: self.db_code = 1 else: - raise KeyError( - "Invalid database type specified. Pick one of:" - " MyVoters, MyCampaign, MyMembers, EveryAction." - ) + raise KeyError('Invalid database type specified. Pick one of:' + ' MyVoters, MyCampaign, MyMembers, EveryAction.') self.uri = URI self.db = db self.auth_name = auth_name - self.pagination_key = "nextPageLink" - self.auth = (self.auth_name, self.api_key + "|" + str(self.db_code)) - self.api = APIConnector( - self.uri, - auth=self.auth, - data_key="items", - pagination_key=self.pagination_key, - ) + self.pagination_key = 'nextPageLink' + self.auth = (self.auth_name, self.api_key + '|' + str(self.db_code)) + self.api = APIConnector(self.uri, auth=self.auth, data_key='items', + pagination_key=self.pagination_key) # We will not create the SOAP client unless we need to as this triggers checking for # valid credentials. As not all API keys are provisioned for SOAP, this keeps it from @@ -47,7 +42,7 @@ def api_key_profile(self): Returns the API key profile with includes permissions and other metadata. """ - return self.get_request("apiKeyProfiles")[0] + return self.get_request('apiKeyProfiles')[0] @property def soap_client(self): @@ -55,12 +50,7 @@ def soap_client(self): if not self._soap_client: # Create the SOAP client - soap_auth = { - "Header": { - "DatabaseMode": self.soap_client_db(), - "APIKey": self.api_key, - } - } + soap_auth = {'Header': {'DatabaseMode': self.soap_client_db(), 'APIKey': self.api_key}} self._soap_client = Client(SOAP_URI, soapheaders=soap_auth) return self._soap_client @@ -70,10 +60,10 @@ def soap_client_db(self): Parse the REST database name to the accepted SOAP format """ - if self.db == "MyVoters": - return "MyVoterFile" - if self.db == "EveryAction": - return "MyCampaign" + if self.db == 'MyVoters': + return 'MyVoterFile' + if self.db == 'EveryAction': + return 'MyCampaign' else: return self.db @@ -84,12 +74,11 @@ def get_request(self, endpoint, **kwargs): # Paginate while isinstance(r, dict) and self.api.next_page_check_url(r): - if endpoint == "savedLists" and not r["items"]: - break - if endpoint == "printedLists" and not r["items"]: + if endpoint == 'savedLists' and not r['items']: break r = self.api.get_request(r[self.pagination_key], **kwargs) data.extend(self.api.data_parse(r)) + return data def post_request(self, endpoint, **kwargs): diff --git a/parsons/notifications/gmail.py b/parsons/notifications/gmail.py index 29ceafdde2..03233ec1be 100644 --- a/parsons/notifications/gmail.py +++ b/parsons/notifications/gmail.py @@ -5,7 +5,7 @@ from oauth2client import file, client, tools from parsons.notifications.sendmail import SendMail -SCOPES = "https://www.googleapis.com/auth/gmail.send" +SCOPES = 'https://www.googleapis.com/auth/gmail.send' class Gmail(SendMail): @@ -21,7 +21,7 @@ class Gmail(SendMail): "me" which is used to indicate the authenticated user. """ - def __init__(self, creds_path=None, token_path=None, user_id="me"): + def __init__(self, creds_path=None, token_path=None, user_id='me'): self.user_id = user_id @@ -44,13 +44,13 @@ def __init__(self, creds_path=None, token_path=None, user_id="me"): # BUG-1 # self.creds = self.run_flow(flow, self.store, http=http) - self.service = build("gmail", "v1", http=self.creds.authorize(Http())) + self.service = build('gmail', 'v1', http=self.creds.authorize(Http())) # BUG-1 # self.service = build('gmail', 'v1', http=self.creds.authorize(http)) def _encode_raw_message(self, message): - return {"raw": base64.urlsafe_b64encode(message.as_bytes()).decode()} + return {'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()} def _send_message(self, msg): """Send an email message. @@ -71,17 +71,15 @@ def _send_message(self, msg): self.log.debug(message) try: - message = ( - self.service.users() - .messages() - .send(userId=self.user_id, body=message) - .execute() - ) + message = (self.service.users().messages() + .send(userId=self.user_id, body=message).execute()) except errors.HttpError: - self.log.exception("An error occurred: while attempting to send a message.") + self.log.exception( + 'An error occurred: while attempting to send a message.') raise else: self.log.debug(message) - self.log.info(f"Message sent succesfully (Message Id: {message['id']})") + self.log.info( + f"Message sent succesfully (Message Id: {message['id']})") return message diff --git a/parsons/notifications/sendmail.py b/parsons/notifications/sendmail.py index bf7873454d..11199e20b2 100644 --- a/parsons/notifications/sendmail.py +++ b/parsons/notifications/sendmail.py @@ -1,5 +1,4 @@ # Adapted from Gmail API tutorial https://developers.google.com/gmail/api -from abc import ABC, abstractmethod from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.mime.image import MIMEImage @@ -25,30 +24,14 @@ logger = logging.getLogger(__name__) -class SendMail(ABC): +class SendMail(object): """SendMail base class for sending emails. - - This class is not designed to be used directly, - as it has useful methods for composing messages and validating emails - but does not contain all the required functionality in order - to send a message. Rather it should be subclassed for each different type of - email service, and those subclasses should define an __init__ - method (to set any instance attributes such as credentials) and a _send_message - method (to implement the actual sending of the message). - - For an example of this subclassing in practice, look at the Gmail notification - connector in parsons.notifications.gmail. """ log = logger - @abstractmethod - def __init__(self, *args, **kwargs): - pass - - @abstractmethod def _send_message(self, message): - pass + raise NotImplementedError("send_message is how to send the prepared message") def _create_message_simple(self, sender, to, subject, message_text): """Create a text-only message for an email. @@ -68,13 +51,14 @@ def _create_message_simple(self, sender, to, subject, message_text): self.log.info("Creating a simple message...") message = MIMEText(message_text) - message["to"] = to - message["from"] = sender - message["subject"] = subject + message['to'] = to + message['from'] = sender + message['subject'] = subject return message - def _create_message_html(self, sender, to, subject, message_text, message_html): + def _create_message_html(self, sender, to, subject, message_text, + message_html): """Create an html message for an email. `Args:` @@ -93,19 +77,18 @@ def _create_message_html(self, sender, to, subject, message_text, message_html): """ self.log.info("Creating an html message...") - message = MIMEMultipart("alternative") - message["subject"] = subject - message["from"] = sender - message["to"] = to + message = MIMEMultipart('alternative') + message['subject'] = subject + message['from'] = sender + message['to'] = to if message_text: - message.attach(MIMEText(message_text, "plain")) - message.attach(MIMEText(message_html, "html")) + message.attach(MIMEText(message_text, 'plain')) + message.attach(MIMEText(message_html, 'html')) return message - def _create_message_attachments( - self, sender, to, subject, message_text, files, message_html=None - ): + def _create_message_attachments(self, sender, to, subject, message_text, + files, message_html=None): """Create a message for an email that includes an attachment. `Args:` @@ -126,21 +109,21 @@ def _create_message_attachments( """ self.log.info("Creating a message with attachments...") - message = MIMEMultipart("alternative") - message["to"] = to - message["from"] = sender - message["subject"] = subject + message = MIMEMultipart('alternative') + message['to'] = to + message['from'] = sender + message['subject'] = subject - msg = MIMEText(message_text, "plain") + msg = MIMEText(message_text, 'plain') message.attach(msg) if message_html: - html = MIMEText(message_html, "html") + html = MIMEText(message_html, 'html') message.attach(html) for f in files: - filename = getattr(f, "name", "file") - file_bytes = b"" + filename = getattr(f, 'name', 'file') + file_bytes = b'' if isinstance(f, io.StringIO): file_bytes = f.getvalue().encode() @@ -148,34 +131,34 @@ def _create_message_attachments( file_bytes = f.getvalue() else: filename = os.path.basename(f) - fp = open(f, "rb") + fp = open(f, 'rb') file_bytes = fp.read() fp.close() content_type, encoding = mimetypes.guess_type(filename) self.log.debug( - f"(File: {f}, Content-type: {content_type}, " f"Encoding: {encoding})" - ) + f"(File: {f}, Content-type: {content_type}, " + f"Encoding: {encoding})") if content_type is None or encoding is not None: - content_type = "application/octet-stream" + content_type = 'application/octet-stream' - main_type, sub_type = content_type.split("/", 1) + main_type, sub_type = content_type.split('/', 1) - if main_type == "text": + if main_type == 'text': self.log.info("Added a text file.") - msg = MIMEText(file_bytes, _subtype=sub_type, _charset="utf-8") + msg = MIMEText(file_bytes, _subtype=sub_type, _charset='utf-8') - elif main_type == "image": + elif main_type == 'image': self.log.info("Added an image file.") msg = MIMEImage(file_bytes, _subtype=sub_type) - msg.add_header("Content-ID", f"<{filename}>") + msg.add_header('Content-ID', f'<{filename}>') - elif main_type == "audio": + elif main_type == 'audio': self.log.info("Added an audio file.") msg = MIMEAudio(file_bytes, _subtype=sub_type) - elif main_type == "application": + elif main_type == 'application': self.log.info("Added an application file.") msg = MIMEApplication(file_bytes, _subtype=sub_type) @@ -185,7 +168,8 @@ def _create_message_attachments( msg.set_payload(file_bytes) encode_base64(msg) - msg.add_header("Content-Disposition", "attachment", filename=filename) + msg.add_header( + 'Content-Disposition', 'attachment', filename=filename) message.attach(msg) return message @@ -202,9 +186,8 @@ def _validate_email_string(self, str): return True - def send_email( - self, sender, to, subject, message_text, message_html=None, files=None - ): + def send_email(self, sender, to, subject, message_text, message_html=None, + files=None): """Send an email message. `Args:` @@ -227,7 +210,7 @@ def send_email( `Returns:` None """ - self.log.info("Preparing to send an email...") + self.log.info("Preparing to send and email...") self.log.info("Validating email(s)") if isinstance(to, list): @@ -237,28 +220,26 @@ def send_email( for e in to: self._validate_email_string(e) - to = ", ".join(to) + to = ', '.join(to) elif isinstance(to, str): self._validate_email_string(to) if not message_html and not files: - msg_type = "simple" + msg_type = 'simple' msg = self._create_message_simple(sender, to, subject, message_text) elif not files: - msg_type = "html" + msg_type = 'html' msg = self._create_message_html( - sender, to, subject, message_text, message_html - ) + sender, to, subject, message_text, message_html) else: - msg_type = "attachments" + msg_type = 'attachments' if isinstance(files, str): files = [files] msg = self._create_message_attachments( - sender, to, subject, message_text, files, message_html - ) + sender, to, subject, message_text, files, message_html) self.log.info(f"Sending a(n) {msg_type} email...") @@ -269,5 +250,4 @@ def send_email( class EmptyListError(IndexError): """Throw when a list is empty that should contain at least 1 element.""" - pass diff --git a/parsons/notifications/slack.py b/parsons/notifications/slack.py index 5b36aead45..7c9271d259 100644 --- a/parsons/notifications/slack.py +++ b/parsons/notifications/slack.py @@ -11,6 +11,7 @@ class Slack(object): + def __init__(self, api_key=None): if api_key is None: @@ -19,10 +20,8 @@ def __init__(self, api_key=None): self.api_key = os.environ["SLACK_API_TOKEN"] except KeyError: - raise KeyError( - "Missing api_key. It must be passed as an " - "argument or stored as environmental variable" - ) + raise KeyError('Missing api_key. It must be passed as an ' + 'argument or stored as environmental variable') else: @@ -30,9 +29,8 @@ def __init__(self, api_key=None): self.client = SlackClient(self.api_key) - def channels( - self, fields=["id", "name"], exclude_archived=False, types=["public_channel"] - ): + def channels(self, fields=['id', 'name'], exclude_archived=False, + types=['public_channel']): """ Return a list of all channels in a Slack team. @@ -54,34 +52,21 @@ def channels( See :ref:`parsons-table` for output options. """ tbl = self._paginate_request( - "conversations.list", - "channels", - types=types, - exclude_archived=exclude_archived, - ) - - tbl.unpack_dict( - "topic", include_original=False, prepend=True, prepend_value="topic" - ) - tbl.unpack_dict( - "purpose", include_original=False, prepend=True, prepend_value="purpose" - ) + "conversations.list", "channels", types=types, + exclude_archived=exclude_archived) + + tbl.unpack_dict("topic", include_original=False, prepend=True, + prepend_value="topic") + tbl.unpack_dict("purpose", include_original=False, + prepend=True, prepend_value="purpose") rm_cols = [x for x in tbl.columns if x not in fields] tbl.remove_column(*rm_cols) return tbl - def users( - self, - fields=[ - "id", - "name", - "deleted", - "profile_real_name_normalized", - "profile_email", - ], - ): + def users(self, fields=['id', 'name', 'deleted', 'profile_real_name_normalized', + 'profile_email']): """ Return a list of all users in a Slack team. @@ -98,9 +83,8 @@ def users( tbl = self._paginate_request("users.list", "members", include_locale=True) - tbl.unpack_dict( - "profile", include_original=False, prepend=True, prepend_value="profile" - ) + tbl.unpack_dict("profile", include_original=False, prepend=True, + prepend_value="profile") rm_cols = [x for x in tbl.columns if x not in fields] tbl.remove_column(*rm_cols) @@ -124,10 +108,10 @@ def message(cls, channel, text, webhook=None, parent_message_id=None): parent_message_id: str The `ts` value of the parent message. If used, this will thread the message. """ - webhook = check("SLACK_API_WEBHOOK", webhook, optional=True) - payload = {"channel": channel, "text": text} + webhook = check('SLACK_API_WEBHOOK', webhook, optional=True) + payload = {'channel': channel, 'text': text} if parent_message_id: - payload["thread_ts"] = parent_message_id + payload['thread_ts'] = parent_message_id return requests.post(webhook, json=payload) def message_channel(self, channel, text, as_user=False, parent_message_id=None): @@ -152,35 +136,24 @@ def message_channel(self, channel, text, as_user=False, parent_message_id=None): A response json """ resp = self.client.api_call( - "chat.postMessage", - channel=channel, - text=text, - as_user=as_user, - thread_ts=parent_message_id, - ) + "chat.postMessage", channel=channel, text=text, + as_user=as_user, thread_ts=parent_message_id) - if not resp["ok"]: + if not resp['ok']: - if resp["error"] == "ratelimited": - time.sleep(int(resp["headers"]["Retry-After"])) + if resp['error'] == 'ratelimited': + time.sleep(int(resp['headers']['Retry-After'])) resp = self.client.api_call( - "chat.postMessage", channel=channel, text=text, as_user=as_user - ) + "chat.postMessage", + channel=channel, text=text, as_user=as_user) - raise SlackClientError(resp["error"]) + raise SlackClientError(resp['error']) return resp - def upload_file( - self, - channels, - filename, - filetype=None, - initial_comment=None, - title=None, - is_binary=False, - ): + def upload_file(self, channels, filename, filetype=None, + initial_comment=None, title=None, is_binary=False): """ Upload a file to Slack channel(s). @@ -206,35 +179,27 @@ def upload_file( `dict`: A response json """ - if filetype is None and "." in filename: - filetype = filename.split(".")[-1] + if filetype is None and '.' in filename: + filetype = filename.split('.')[-1] - mode = "rb" if is_binary else "r" + mode = 'rb' if is_binary else 'r' with open(filename, mode) as file_content: resp = self.client.api_call( - "files.upload", - channels=channels, - file=file_content, - filetype=filetype, - initial_comment=initial_comment, - title=title, - ) + "files.upload", channels=channels, file=file_content, + filetype=filetype, initial_comment=initial_comment, + title=title) - if not resp["ok"]: + if not resp['ok']: - if resp["error"] == "ratelimited": - time.sleep(int(resp["headers"]["Retry-After"])) + if resp['error'] == 'ratelimited': + time.sleep(int(resp['headers']['Retry-After'])) resp = self.client.api_call( - "files.upload", - channels=channels, - file=file_content, - filetype=filetype, - initial_comment=initial_comment, - title=title, - ) + "files.upload", channels=channels, file=file_content, + filetype=filetype, initial_comment=initial_comment, + title=title) - raise SlackClientError(resp["error"]) + raise SlackClientError(resp['error']) return resp @@ -247,15 +212,16 @@ def _paginate_request(self, endpoint, collection, **kwargs): next_page = True cursor = None while next_page: - resp = self.client.api_call(endpoint, cursor=cursor, limit=LIMIT, **kwargs) + resp = self.client.api_call( + endpoint, cursor=cursor, limit=LIMIT, **kwargs) - if not resp["ok"]: + if not resp['ok']: - if resp["error"] == "ratelimited": - time.sleep(int(resp["headers"]["Retry-After"])) + if resp['error'] == 'ratelimited': + time.sleep(int(resp['headers']['Retry-After'])) continue - raise SlackClientError(resp["error"]) + raise SlackClientError(resp['error']) items.extend(resp[collection]) diff --git a/parsons/notifications/smtp.py b/parsons/notifications/smtp.py index ae55ee2a64..7444a6f283 100644 --- a/parsons/notifications/smtp.py +++ b/parsons/notifications/smtp.py @@ -21,23 +21,13 @@ class SMTP(SendMail): close_manually: bool When set to True, send_message will not close the connection """ - - def __init__( - self, - host=None, - port=None, - username=None, - password=None, - tls=None, - close_manually=False, - ): - self.host = check("SMTP_HOST", host) - self.port = check("SMTP_PORT", port, optional=True) or 587 - self.username = check("SMTP_USER", username) - self.password = check("SMTP_PASSWORD", password) - self.tls = not ( - check("SMTP_TLS", tls, optional=True) in ("false", "False", "0", False) - ) + def __init__(self, host=None, port=None, username=None, password=None, tls=None, + close_manually=False): + self.host = check('SMTP_HOST', host) + self.port = check('SMTP_PORT', port, optional=True) or 587 + self.username = check('SMTP_USER', username) + self.password = check('SMTP_PASSWORD', password) + self.tls = not (check('SMTP_TLS', tls, optional=True) in ('false', 'False', '0', False)) self.close_manually = close_manually self.conn = None @@ -63,19 +53,16 @@ def _send_message(self, message): self.log.info("Sending a message...") try: conn = self.get_connection() - result = conn.sendmail( - message["From"], - [x.strip() for x in message["To"].split(",")], - message.as_string(), - ) + result = conn.sendmail(message['From'], + [x.strip() for x in message['To'].split(',')], + message.as_string()) except Exception: - self.log.exception("An error occurred: while attempting to send a message.") + self.log.exception( + 'An error occurred: while attempting to send a message.') raise if result: - self.log.warning( - "Message failed to send to some recipients: " + str(result) - ) + self.log.warning("Message failed to send to some recipients: " + str(result)) if not self.close_manually: conn.quit() self.conn = None diff --git a/parsons/pdi/__init__.py b/parsons/pdi/__init__.py index 0dea8895a6..b62b40603d 100644 --- a/parsons/pdi/__init__.py +++ b/parsons/pdi/__init__.py @@ -1,3 +1,5 @@ from parsons.pdi.pdi import PDI -__all__ = ["PDI"] +__all__ = [ + 'PDI' +] diff --git a/parsons/pdi/acquisition_types.py b/parsons/pdi/acquisition_types.py index 83d2a6ffdc..cc70102716 100644 --- a/parsons/pdi/acquisition_types.py +++ b/parsons/pdi/acquisition_types.py @@ -7,8 +7,9 @@ def __init__(self): super().__init__() - def get_acquisition_types(self, limit: int = None): + def get_acquisition_types(self, limit=None): """Get a list of Acquisition Types. + `Args:` limit: int Specify limit to return. @@ -17,112 +18,5 @@ def get_acquisition_types(self, limit: int = None): parsons.Table A Parsons table of all the data. """ - return self._request(self.url_acqtypes, limit=limit) - - def create_acquisition_type( - self, - acquisition_type: str, - acquisition_description: str, - acquisition_method: str, - page_default: str = None, - ): - """ - Create a new Acquisition Type - `Args:` - acquisition_type (string): The acquisition type - acquisition_description (string): The acquisition description - acquisition_method (string): The acquisition method - Options are: - "Phone" - "Canvass" - "Mail" - "IVR" - "Text Message" - "Email" - "Event" - "Online" - "Social" - "Site" - "Other Method" , - pageDefault (string, optional): The page default. - "Lookup" (Lookup Page) - "WalkList" (Create Lists & Files - Walk List) - "PhoneList" (Create Lists & Files - Phone List) - "PhoneBank" (Online Phone Bank) - "Canvassing" (Mobile Canvassing Device) - "Import" (Imports) - } - """ - payload = { - "acquisitionType": acquisition_type, - "acquisitionDescription": acquisition_description, - "acquisitionMethod": acquisition_method, - "pageDefault": page_default, - } - return self._request(self.url_acqtypes, req_type="POST", post_data=payload) - def get_acquisition_type(self, id: str): - """ - Get a Acquisition Type by id. - `Args:` - id: str - The Acquisition Type id - `Returns:` - parsons.Table - A Parsons table of all the data. - """ - return self._request(f"{self.url_acqtypes}/{id}") - - def delete_acquisition_type(self, id: str): - """ - Delete a Acquisition Type by id. - `Args:` - id: str - The Acquisition Type id - """ - return self._request(f"{self.url_acqtypes}/{id}", req_type="DELETE") - - def update_acquisition_type( - self, - id: str, - acquisition_type: str, - acquisition_description: str, - acquisition_method: str, - page_default: str = None, - ): - """ - Update Acquisition Type - `Args:` - acquisition_type (string): The acquisition type - acquisition_description (string): The acquisition description - acquisition_method (string): The acquisition method - Options are: - "Phone" - "Canvass" - "Mail" - "IVR" - "Text Message" - "Email" - "Event" - "Online" - "Social" - "Site" - "Other Method" , - pageDefault (string, optional): The page default. - "Lookup" (Lookup Page) - "WalkList" (Create Lists & Files - Walk List) - "PhoneList" (Create Lists & Files - Phone List) - "PhoneBank" (Online Phone Bank) - "Canvassing" (Mobile Canvassing Device) - "Import" (Imports) - } - """ - payload = { - "acquisitionType": acquisition_type, - "acquisitionDescription": acquisition_description, - "acquisitionMethod": acquisition_method, - "pageDefault": page_default, - } - return self._request( - f"{self.url_acqtypes}/{id}", req_type="PUT", post_data=payload - ) + return self._request(self.url_acqtypes, limit=limit) diff --git a/parsons/pdi/activities.py b/parsons/pdi/activities.py deleted file mode 100644 index 9f4bf33150..0000000000 --- a/parsons/pdi/activities.py +++ /dev/null @@ -1,54 +0,0 @@ -class Activities: - """A class to access the Activities PDI API endpoint.""" - - def __init__(self): - activites_endpoint = "/activities" - self.url_activites = self.base_url + activites_endpoint - super().__init__() - - def get_activities(self, limit: int = None): - """Get a list of Activities. - `Args:` - limit: int - Specify limit to return. - - `Returns:` - parsons.Table - A Parsons table of all the data. - """ - return self._request(self.url_activites, limit=limit) - - def create_activity(self, activity_name: str, canvassing_shift: bool): - """ - Create a New Activity - `Args:` - activity_name str: The activity name - canvassing_shift bool: The canvassing shift - """ - payload = {"activityName": activity_name, "canvassingShift": canvassing_shift} - return self._request(self.url_activites, req_type="POST", post_data=payload) - - def get_activity(self, id: str): - """ - Get a Activity by id. - `Args:` - id: str - The Activity id - `Returns:` - parsons.Table - A Parsons table of all the data. - """ - return self._request(f"{self.url_activites}/{id}") - - def update_activity(self, id: str, activity_name: str, canvassing_shift: str): - """ - Update an Activity - `Args:` - id: Activity id - activity_name str: The activity name - canvassing_shift bool: The canvassing shift - """ - payload = {"activityName": activity_name, "canvassingShift": canvassing_shift} - return self._request( - f"{self.url_activites}/{id}", req_type="PUT", post_data=payload - ) diff --git a/parsons/pdi/contacts.py b/parsons/pdi/contacts.py deleted file mode 100644 index 73dc0c0522..0000000000 --- a/parsons/pdi/contacts.py +++ /dev/null @@ -1,254 +0,0 @@ -class Contacts: - """A class to access the contacts PDI API endpoint.""" - - def __init__(self): - self.url_contacts = self.base_url + "/contacts" - super().__init__() - - def get_contacts( - self, - email: str = None, - phone: str = None, - first_name: str = None, - last_name: str = None, - zip_code: str = None, - search_by_email: bool = False, - limit: int = None, - ): - """ - Get a list of Contacts. - `Args:` - email: str, email address - phone: str, phone number - first_name: str, first name - last_name: str, last name - zip code: str, zip code - search_by_email: bool, whether to search using email address - limit: int - The number of contacts to return. - `Returns:` - parsons.Table - A Parsons table of all the data. - """ - params = { - "email": email, - "phone": phone, - "firstName": first_name, - "lastName": last_name, - "zipCode": zip_code, - "searchByEmail": search_by_email, - } - return self._request(self.url_contacts, args=params, limit=limit) - - def create_contact( - self, - name_prefix="", - first_name="", - last_name="", - middle_name="", - name_suffix="", - nickname="", - occupation="", - employer="", - volunteer_status="", - donor_status="", - member_status="", - date_of_birth=None, - gender=None, - pdi_id=None, - ): - """ - Create new contact - `Args:` - pdiId (string, optional): The pdi identifier. pdiId field is ignored when updating. , - namePrefix (string): The name prefix. - firstName (string): The first name. - middleName (string): The middle name. - lastName (string): The last name. - nameSuffix (string): The name suffix. - nickname (string): The nickname. - occupation (string): The occupation. - employer (string): The employer. - volunteerStatus (string): The volunteer status. - Options are: "Prospect", "Active", "Inactive", "None", "" , - donorStatus (string): The donor status. - Options are: "Prospect", "Active", "Inactive", "None", "" , - memberStatus (string): The member status. - Options are: "Prospect", "Active", "Inactive", "None", "" , - dateOfBirth (string, optional): The date of birth. - Format allowed: yyyy-MM-dd , - gender (string, optional): The gender. - Options are: "F", "M", "U" - - `Returns:` - parsons.Table - A Parsons table of all the data. - """ - payload = { - "namePrefix": name_prefix, - "firstName": first_name, - "lastName": last_name, - "nameSuffix": name_suffix, - "nickname": nickname, - "middleName": middle_name, - "occupation": occupation, - "employer": employer, - "volunteerStatus": volunteer_status, - "donorStatus": donor_status, - "memberStatus": member_status, - "dateOfBirth": date_of_birth, - "gender": gender, - "pdiId": pdi_id, - } - return self._request(self.url_contacts, req_type="POST", post_data=payload) - - def get_contact(self, id: str): - """ - Get a Contact by id. - - `Args:` - id: str - The Contact id - `Returns:` - parsons.Table - A Parsons table of all the data. - """ - # todo not working quite right - return self._request(f"{self.url_contacts}/{id}") - - def update_contact( - self, - id, - first_name, - last_name, - name_prefix="", - middle_name="", - name_suffix="", - nickname="", - occupation="", - employer="", - volunteer_status="", - donor_status="", - member_status="", - date_of_birth=None, - gender="U", - ): - """ - Update Contact - `Args:` - namePrefix (string): The name prefix. - firstName (string): The first name. - middleName (string): The middle name. - lastName (string): The last name. - nameSuffix (string): The name suffix. - nickname (string): The nickname. - occupation (string): The occupation. - employer (string): The employer. - volunteerStatus (string): The volunteer status. - Options are: "Prospect", "Active", "Inactive", "None", "" , - donorStatus (string): The donor status. - Options are: "Prospect", "Active", "Inactive", "None", "" , - memberStatus (string): The member status. - Options are: "Prospect", "Active", "Inactive", "None", "" , - dateOfBirth (string, optional): The date of birth. - Format allowed: yyyy-MM-dd , - gender (string, optional): The gender. - Options are: "F", "M", "U" - - `Returns:` - parsons.Table - A Parsons table of all the data. - """ - payload = { - "namePrefix": name_prefix, - "firstName": first_name, - "middleName": middle_name, - "lastName": last_name, - "nameSuffix": name_suffix, - "nickname": nickname, - "occupation": occupation, - "employer": employer, - "volunteerStatus": volunteer_status, - "donorStatus": donor_status, - "memberStatus": member_status, - "dateOfBirth": date_of_birth, - "gender": gender, - } - res = self._request( - f"{self.url_contacts}/{id}", req_type="PUT", post_data=payload - ) - if res["code"] == 201: - return True - - def add_phone( - self, - contact_id: int, - phone_number: str, - phone_type="Mobile", - primary=True, - extension=None, - ): - """Add a phone number to a contact - `Args:` - contact_id: int - Unique ID of the contact you'd like to apply the phone_number to - phone_number: str - phone_type: str - Options are `Home`, `Work`, `Direct`, `Mobile`, `Fax`, and `Other. Defaults to - `Mobile` - primary: bool - True indicates that this phone number is the contact's primary phone number - extension: str - `Returns:` - dict - Response from PDI - """ - - payload = { - "phoneNumber": phone_number, - "phoneType": phone_type, - "isPrimary": primary, - } - - if extension: - payload["extension"] = extension - - response = self._request( - self.url_contacts + f"/{str(contact_id)}/phones", - req_type="POST", - post_data=payload, - ) - - return response - - def add_email(self, contact_id: int, email: str, primary=True): - """Add an email address to a contact - `Args:` - contact_id: int - Unique ID of the contact you'd like to apply the email to - email: str - primary: bool - True indicates that this email address is the contact's primary email - `Returns:` - dict - Response from PDI - """ - - payload = {"emailAddress": email, "isPrimary": primary} - - response = self._request( - self.url_contacts + f"/{str(contact_id)}/emails", - req_type="POST", - post_data=payload, - ) - - return response - - def delete_contact(self, id: str): - """ - Delete a Question by id. - `Args:` - id: str - The Question id - """ - return self._request(f"{self.url_contacts}/{id}", req_type="DELETE") diff --git a/parsons/pdi/events.py b/parsons/pdi/events.py deleted file mode 100644 index 1f5164b8f2..0000000000 --- a/parsons/pdi/events.py +++ /dev/null @@ -1,587 +0,0 @@ -import logging - -logger = logging.getLogger(__name__) - - -class Events: - """A class for interacting with PDI events via PDIs API""" - - def __init__(self): - self.events_url = self.base_url + "/events" - self.calendars_url = self.base_url + "/calendars" - self.eventactivities_url = self.base_url + "/eventActivities" - self.activites_url = self.base_url + "/activities" - self.activityassignment_url = self.base_url + "/eventActivityAssignments" - - super().__init__() - - def get_events(self, first_event_date: str, last_event_date: str, limit=None): - """Get a table of PDI events in a given time frame - - `Args:` - first_event_date: str - First date in the timeframe from which you want events formatted at 'yyy-MM-dd' - last_event_date: str - Last date in the timeframe from which you want events formatted at 'yyy-MM-dd' - limit: int - The max number of events to return - - `Returns:` - parsons.Table - A Parsons table containing all requested events data. - """ - - params = { - "startDate": first_event_date, - "endDate": last_event_date, - } - - return self._request(self.events_url, args=params, limit=limit) - - def get_event_invitations(self, event_id: str, expand=True, limit=None): - """Get a table of PDI event invitations for a specified event - - `Args:` - event_id: str - ID of event for which you want invitations - expand: bool - If True returns columns for contact (and all contact info) and event) - - `Returns:` - parsons.Table - A Parsons table containing all requested event invitation data. - """ - - params = {"expand": expand} - - return self._request( - f"{self.events_url}/{event_id}/invitations", args=params, limit=limit - ) - - def create_event( - self, - calendar_id: str, - location_id: str, - event_name: str, - start_datetime: str, - end_datetime: str, - description=None, - all_day=False, - recurrencetype=None, - recurrence_end_datetime=None, - host_phone=None, - host_email=None, - website=None, - ): - """Create event in a specified calendar - - `Args:` - calendar_id: str - The calendar in which you'd like to create an event - location_id: str - The unique ID of the PDI location this event took place/is to take place at - event_name: str - The name of your event - description: str - A short description for your event - start_datetime: str - The start datetime of the event in UTC timezone formatted as - yyyy-MM-ddThh:mm:ss.fffZ - end_datetime: str - The end date formatted like start_datetime - is_all_day: bool - set to True if event is an all day event. Defaults to False - recurrencetype: str - Either 'daily', 'weekly', or 'monthly'. Defaults to None - recurrence_end_datetime: str - The end time of the last recurrence of the event formatted as - yyyy-MM-ddThh:mm:ss.fffZ - host_phone: str - An optional contact phone number for the host. Defaults to None - host_email: str - An optional contact email for the host. Defaults to None - website: str - An optional website for the event. Defualts to None - - `Returns:` - dict - Response from PDI in dictionary object - - """ - - payload = { - "locationId": location_id, - "recurrenceType": recurrencetype, - "name": event_name, - "description": description, - "startDateTimeUtc": start_datetime, - "endDateTimeUtc": end_datetime, - "isAllDay": str(all_day).lower(), - "recurrenceEndDateTimeUtc": recurrence_end_datetime, - "phone": host_phone, - "contactEmail": host_email, - "website": website, - } - - response = self._request( - self.calendars_url + f"/{calendar_id}" + "/events", - req_type="POST", - post_data=payload, - ) - event_id = response["id"] - logger.info(f"Created event {event_name} (id: {event_id})") - - return response - - def create_event_with_activity( - self, - calendar_id: str, - location_id: str, - activity_id: str, - event_name: str, - activity_name: str, - start_datetime: str, - end_datetime: str, - description=None, - all_day=False, - recurrencetype=None, - recurrence_end_datetime=None, - host_phone=None, - host_email=None, - website=None, - signup_goal=None, - ): - """Create event in a specified calendar with an associated activity. The activty will - be assigned the same start, end time, and recurrance settings as the event. - - `Args:` - calendar_id: str - The unique ID of the calendar in which you'd like to create an event - location_id: str - The unique ID of the PDI location whek this event took place/is to take - place - activity_id: - The unique ID of the activity type you'd like to add to the event - event_name: str - The name of your event - activity_name: str - The name of your activity. e.g. 'Pictionary!' - description: str - A short description for your event - start_datetime: str - The start datetime of the event in UTC timezone formatted as - yyyy-MM-ddThh:mm:ss.fffZ - end_datetime: str - The end date formatted like start_datetime - is_all_day = bool - set to True if event is an all day event. Defaults to False - recurrencetype: str - Either 'daily', 'weekly', or 'monthly'. Defaults to None - recurrence_end_datetime: str - The end time of the last recurrence of the event formatted as - yyyy-MM-ddThh:mm:ss.fffZ - host_phone: str - An optional contact phone number for the host. Defaults to None - host_email: str - An optional contact email for the host. Defaults to None - website: str - An optional website for the event. Defualts to None - signup_goal: int - The goal of how many people you want to complete the activity - `Returns:` - dict - Response from PDI in dictionary object - """ - event_data = self.create_event( - calendar_id, - location_id, - event_name, - start_datetime, - end_datetime, - description, - all_day, - recurrencetype, - recurrence_end_datetime, - host_phone, - host_email, - website, - ) - event_id = event_data["id"] - logger.info(f"Created event {event_name} (id: {event_id})") - - event_activity_payload = { - "CalendarId": calendar_id, - "EventId": event_id, - "ActivityId": activity_id, - "LocationId": location_id, - "RecurrenceType": recurrencetype, - "Name": activity_name, - "Description": None, - "StartDateTimeUtc": start_datetime, - "EndDateTimeUtc": end_datetime, - "CountGoal": signup_goal, - "RecurrenceEndDateTimeUtc": recurrence_end_datetime, - } - - response = self._request( - self.eventactivities_url, req_type="POST", post_data=event_activity_payload - ) - logger.info( - f"Created activity {activity_name} for event {event_name} (id: {event_id})" - ) - - return response - - def create_event_activity( - self, - calendar_id: str, - event_id: str, - activity_id: str, - location_id: str, - activity_name: str, - start_datetime: str, - end_datetime: str, - description=None, - recurrencetype=None, - recurrence_end_datetime=None, - signup_goal=None, - ): - """Create event in a specified calendar with an associated activity - - `Args:` - calendar_id: str - The unique ID of the calendar in which you'd like to create an event - event_id: str - The unique ID of the event this activity is to be associated with - activity_id: - The unique ID of the activity type you'd like to add to the event - location_id: str - The unique ID of the PDI location where this event took place/is to take - place - activity_name: str - The name of your activity. e.g. 'Pictionary!' - description: str - A short description for your event activity - start_datetime: str - The start datetime of the event in UTC timezone formatted as - yyyy-MM-ddThh:mm:ss.fffZ - end_datetime: str - The end date formatted like start_datetime - recurrencetype: str - Either 'daily', 'weekly', or 'monthly'. Defaults to None - recurrence_end_datetime: str - The end time of the last recurrence of the event formatted as - yyyy-MM-ddThh:mm:ss.fffZ - signup_goal: int - The goal of how many people you want to complete the activity - - - `Returns:` - dict - Response from PDI in dictionary object - """ - - event_activity_payload = { - "CalendarId": calendar_id, - "EventId": event_id, - "ActivityId": activity_id, - "LocationId": location_id, - "RecurrenceType": recurrencetype, - "Name": activity_name, - "Description": description, - "StartDateTimeUtc": start_datetime, - "EndDateTimeUtc": end_datetime, - "CountGoal": signup_goal, - "RecurrenceEndDateTimeUtc": recurrence_end_datetime, - } - - response = self._request( - self.eventactivities_url, req_type="POST", post_data=event_activity_payload - ) - logger.info(f"Created activity {activity_name} for event {event_id})") - - return response - - def create_invitation( - self, - event_id: str, - contact_id: str, - status: str, - attended: bool, - confirmed=False, - specific_occurrence_start=None, - ): - """Create a PDI event invitation indicating a contact has been registered for an event - `Args:` - event_id: str - The ID of the event to write the RSVP to - contact_id: str - The ID of the contact to which the invitation belongs - status: str - Options are: "Yes", "No", "Maybe", "Scheduled", "Invited", "Cancelled", - "No-Show", "Completed", and "" - attended: boolean - Indicates whether contact attended event - confirmed: boolean - Indicates whether invitation confirmed they will attend the event. Defaults to - False - specific_occurrence_start: str - If invitation is for a specific occurrence of a recurring event, then the start - datetime of the event in UTC formatted as yyyy-MM-ddTHH:mm:ss.fffZ - `Returns:` - dict - Response from PDI in dictionary object - """ - - event_invitation_payload = { - "contactId": contact_id, - "rsvpStatus": status, - "isConfirmed": confirmed, - "attended": attended, - } - - if specific_occurrence_start: - event_invitation_payload[ - "specificOcurrenceStartUtc" - ] = specific_occurrence_start - - response = self._request( - self.events_url + f"/{event_id}/invitations", - req_type="POST", - post_data=event_invitation_payload, - ) - return response - - def update_invitation( - self, - invitation_id: str, - event_id: str, - contact_id: str, - status=None, - attended=None, - confirmed=None, - specific_occurrence_start=None, - ): - """Modify a PDI event invitation - `Args:` - invitation_id: str - The ID of the event invitation - event_id: str - The ID of the event that corresponds to the invitation - contact_id: str - The ID of the contact to which the invitation belongs - status: str - Options are: "Yes", "No", "Maybe", "Scheduled", "Invited", "Cancelled", - "No-Show", "Completed", and "" - attended: boolean - Indicates whether contact attended event - confirmed: boolean - Indicates whether invitation confirmed they will attend the event - specific_occurrence_start: str - If invitation is for a specific occurrence of a recurring event, then the start - datetime of the event in UTC formatted as yyyy-MM-ddTHH:mm:ss.fffZ - `Returns:` - dict - Response from PDI in dictionary object - """ - - event_invitation_payload = {"contactId": contact_id} - - if status: - event_invitation_payload["rsvpStatus"] = status - if confirmed is not None: - event_invitation_payload["isConfirmed"] = confirmed - if attended is not None: - event_invitation_payload["attended"] = attended - if specific_occurrence_start: - event_invitation_payload[ - "specificOcurrenceStartUtc" - ] = specific_occurrence_start - - response = self._request( - self.events_url + f"/{event_id}/invitations/{invitation_id}", - req_type="PUT", - post_data=event_invitation_payload, - ) - return response - - def create_activity_assignment( - self, - eventactivityid: str, - contact_id: str, - status: str, - completed: bool, - confirmed=False, - specific_occurrence_start=None, - ): - """Create an activity assignement - `Args:` - eventactivityid: str - The ID of the specific event activity you'd like to assign a contact - contact_id: str - The ID of the contact to which the assignment belongs - status: str - Options are: "Yes", "No", "Maybe", "Scheduled", "Invited", "Cancelled", - "No-Show", "Completed", and "" - completed: boolean - Indicates whether contact attended event - confirmed: boolean - Indicates whether invitation confirmed they will attend the event - specific_occurrence_start: str - If invitation is for a specific occurrence of a recurring event, then the start - datetime of the event in UTC formatted as yyyy-MM-ddTHH:mm:ss.fffZ - `Returns:` - dict - Response from PDI in dictionary object - """ - - assignment_payload = { - "rsvpStatus": status, - "isConfirmed": confirmed, - "isShiftWorked": completed, - "contactId": contact_id, - "eventActivityId": eventactivityid, - } - - if specific_occurrence_start: - assignment_payload["specificOcurrenceStartUtc"] = specific_occurrence_start - - response = self._request( - self.activityassignment_url, req_type="POST", post_data=assignment_payload - ) - - return response - - def update_activity_assignment( - self, - activityassignementid: str, - eventactivityid: str, - contact_id: str, - status=None, - completed=None, - confirmed=None, - specific_occurrence_start=None, - ): - """Create an activity assignement - `Args:` - activityassignementid: str - Id of the specific event activity assignement you want to modify - eventactivityid: str - The ID of the specific event activity you'd like to assign a contact - contact_id: str - The ID of the contact to which the assignment belongs - status: str - Options are: "Yes", "No", "Maybe", "Scheduled", "Invited", "Cancelled", - "No-Show", "Completed", and "" - completed: boolean - Indicates whether contact attended event - confirmed: boolean - Indicates whether invitation confirmed they will attend the event - specific_occurrence_start: str - If invitation is for a specific occurrence of a recurring event, then the start - datetime of the event in UTC formatted as yyyy-MM-ddTHH:mm:ss.fffZ - `Returns:` - dict - Response from PDI in dictionary object - """ - - assignment_payload = { - "contactId": contact_id, - "eventActivityId": eventactivityid, - } - - if status: - assignment_payload["rsvpStatus"] = status - if confirmed is not None: - assignment_payload["isConfirmed"] = confirmed - if completed is not None: - assignment_payload["isShiftWorked"] = completed - if specific_occurrence_start: - assignment_payload["specificOcurrenceStartUtc"] = specific_occurrence_start - - response = self._request( - self.activityassignment_url + f"/{activityassignementid}", - req_type="PUT", - post_data=assignment_payload, - ) - - return response - - def get_event_activity_assignments(self, start_date, end_date, expand, limit=None): - """ - Get a list of event activity assignments. - Relevant API docs: - https://api.bluevote.com/docs/index#/EventActivityAssignments - - `Args`: - start_date: str - Earliest records to be returned in the API response - Per the API docs, use "YYYY-MM-DD" format - - end_date: str - Latest records to be returned in the API response. - Per the API docs, use "YYYY-MM-DD" format - - expand: bool - Parameter to determine if we return the list of shift assigments - expanded or not - - limit: int - Specify limit to return (max=2000) - - `Returns`: - Parsons Table with event activity assignment responses - """ - - if limit and limit > 2000: - raise ValueError("Maximum allowed limit is 2000") - - params = {"startDate": start_date, "endDate": end_date, "expand": expand} - return self._request(self.activityassignment_url, args=params, limit=limit) - - def get_event_activities(self, start_date, end_date, limit=None): - """ - Get a list of event activities. - Relevant API docs: - https://api.bluevote.com/docs/index#!/EventActivities/EventActivities_GetAll - - `Args`: - start_date: str - Earliest records to be returned in the API response - Per the API docs, use "YYYY-MM-DD" format - - end_date: str - Latest records to be returned in the API response. - Per the API docs, use "YYYY-MM-DD" format - - limit: int - Specify limit to return (max=2000) - - `Returns`: - Parsons Table with event activity responses - """ - - if limit and limit > 2000: - raise ValueError("Maximum allowed limit is 2000") - - params = {"startDate": start_date, "endDate": end_date} - return self._request(self.eventactivities_url, args=params, limit=limit) - - def get_calendars(self, limit=None): - """ - Gets a list of calendars. - Relevant API docs: - https://api.bluevote.com/docs/index#!/Calendars/Calendars_GetAll - - `Args`: - limit: int - Specify limit to return (max=2000) - - `Returns`: - Parsons Table object with id, name, description, and timeZone records - """ - - if limit and limit > 2000: - raise ValueError("Maximum allowed limit is 2000") - - return self._request(self.calendars_url, limit=limit) diff --git a/parsons/pdi/flag_ids.py b/parsons/pdi/flag_ids.py index 4e2bd84a49..07fb353476 100644 --- a/parsons/pdi/flag_ids.py +++ b/parsons/pdi/flag_ids.py @@ -35,7 +35,8 @@ def get_flag_id(self, id): """ return self._request(f"{self.url_flag_ids}/{id}") - def create_flag_id(self, flag_id, is_default, flag_description=None, compile=None): + def create_flag_id(self, flag_id, is_default, flag_description=None, + compile=None): """Save a new flag id. `Args:` @@ -62,7 +63,8 @@ def create_flag_id(self, flag_id, is_default, flag_description=None, compile=Non "compile": compile, "isDefault": is_default, } - data = self._request(self.url_flag_ids, req_type="POST", post_data=payload) + data = self._request( + self.url_flag_ids, req_type="POST", post_data=payload) return data["id"] @@ -84,9 +86,8 @@ def delete_flag_id(self, id): return True - def update_flag_id( - self, id, flag_id, is_default, flag_description=None, compile=None - ): + def update_flag_id(self, id, flag_id, is_default, flag_description=None, + compile=None): """Update a flag id. `Args:` @@ -115,7 +116,6 @@ def update_flag_id( "isDefault": is_default, } data = self._request( - f"{self.url_flag_ids}/{id}", req_type="PUT", post_data=payload - ) + f"{self.url_flag_ids}/{id}", req_type="PUT", post_data=payload) return data["id"] diff --git a/parsons/pdi/flags.py b/parsons/pdi/flags.py index 8cab60bd1a..090a502f6c 100644 --- a/parsons/pdi/flags.py +++ b/parsons/pdi/flags.py @@ -1,15 +1,15 @@ from dateutil.parser import parse -from datetime import datetime class Flags: """A class to access the Flags PDI API endpoint.""" def __init__(self): - super().__init__() flags_endpoint = "/flags" self.url_flags = self.base_url + flags_endpoint + super().__init__() + def get_flags(self, start_date, end_date, limit=None): """Get a list of flags. @@ -37,39 +37,3 @@ def get_flags(self, start_date, end_date, limit=None): } return self._request(self.url_flags, args=params, limit=limit) - - def create_flags(self, flag_list: list): - """ - Save a list of flags, each flag must look like the dictionary below - [ - { - "pdiId": "string", - "flagEntryDate": An end date formatted like yyyy-MM-dd., - "acquisitionTypeId": "string", - "flagId": "string", - "questionId": "string", - "contactId": "string" - } - ] - """ - if "pdiId" not in list(flag_list[0].keys()): - raise ValueError("missing required key") - return {} - for flag in flag_list: - try: - flag["flagEntryDate"] = str( - datetime.strptime(flag["flagEntryDate"], "%Y-%m-%d").isoformat() - ) - except ValueError: - raise ValueError("Invalid date format.") - print(flag_list) - return self._request(self.url_flags, post_data=flag_list, req_type="POST") - - def delete_flag(self, id: str): - """ - Delete a Flag by id. - `Args:` - id: str - The Flag id - """ - return self._request(f"self.url_flags/{id}", req_type="DELETE") diff --git a/parsons/pdi/locations.py b/parsons/pdi/locations.py deleted file mode 100644 index 8c7ceae47d..0000000000 --- a/parsons/pdi/locations.py +++ /dev/null @@ -1,47 +0,0 @@ -class Locations: - """A class for getting, creating, and editing PDI locations""" - - def __init__(self): - self.locations_url = self.base_url + "/locations" - - super().__init__() - - def get_locations(self, limit=None): - """Get a list of PDI Locations - - `Args:` - limit: int - The max number of locations to return - - `Returns:` - parsons.Table - A Parsons table containing all requested location data. - """ - - return self._request(self.locations_url, limit=limit) - - def create_location(self, address: str, name: str): - """Create a new PDI address - `Args:` - address: str - A full address including street number, city, state, and zip. - name: str - The name of the location. E.g. "The Overlook Hotel" - `Returns:` - dict - Response from PDI in dictionary object - """ - - payload = {"locationName": name, "locationAddress": address} - return self._request(self.locations_url, req_type="POST", post_data=payload) - - def get_location(self, id: str): - return self._request(f"{self.locations_url}/{id}") - - def update_location(self, id: str, location_name: str, address: str): - payload = {"locationName": location_name, "locationAddress": address} - res = self._request( - f"{self.locations_url}/{id}", req_type="PUT", post_data=payload - ) - if res["code"] == 201: - return True diff --git a/parsons/pdi/pdi.py b/parsons/pdi/pdi.py index 5ca952a181..729dda6bc9 100644 --- a/parsons/pdi/pdi.py +++ b/parsons/pdi/pdi.py @@ -3,10 +3,6 @@ from parsons.pdi.universes import Universes from parsons.pdi.questions import Questions from parsons.pdi.acquisition_types import AcquisitionTypes -from parsons.pdi.events import Events -from parsons.pdi.locations import Locations -from parsons.pdi.contacts import Contacts -from parsons.pdi.activities import Activities from parsons import Table from parsons.utilities import check_env @@ -21,18 +17,10 @@ logger = logging.getLogger(__name__) -class PDI( - FlagIDs, - Universes, - Questions, - AcquisitionTypes, - Flags, - Events, - Locations, - Contacts, - Activities, -): - def __init__(self, username=None, password=None, api_token=None, qa_url=False): +class PDI(FlagIDs, Universes, Questions, AcquisitionTypes, Flags): + + def __init__(self, username=None, password=None, api_token=None, + qa_url=False): """ Instantiate the PDI class @@ -48,7 +36,7 @@ def __init__(self, username=None, password=None, api_token=None, qa_url=False): can be set as `PDI_API_TOKEN` environment variable. qa_url: bool Defaults to False. If True, requests will be made to a sandbox - account. This requires separate qa credentials and api + account. NOTE: This requires separate qa credentials and api token. """ if qa_url: @@ -56,9 +44,9 @@ def __init__(self, username=None, password=None, api_token=None, qa_url=False): else: self.base_url = "https://api.bluevote.com" - self.username = check_env.check("PDI_USERNAME", username) - self.password = check_env.check("PDI_PASSWORD", password) - self.api_token = check_env.check("PDI_API_TOKEN", api_token) + self.username = check_env.check('PDI_USERNAME', username) + self.password = check_env.check('PDI_PASSWORD', password) + self.api_token = check_env.check('PDI_API_TOKEN', api_token) super().__init__() @@ -73,9 +61,14 @@ def _get_session_token(self): "Password": self.password, "ApiToken": self.api_token, } - res = requests.post(f"{self.base_url}/sessions", json=login, headers=headers) + res = requests.post( + f"{self.base_url}/sessions", + json=login, + headers=headers) + logger.debug(f"{res.status_code} - {res.url}") res.raise_for_status() + # status_code == 200 data = res.json() self.session_token = data["AccessToken"] @@ -90,7 +83,8 @@ def _clean_dict(self, dct): return dct - def _request(self, url, req_type="GET", post_data=None, args=None, limit=None): + def _request(self, url, req_type='GET', post_data=None, args=None, + limit=None): # Make sure to have a current token before we make another request now = datetime.now(timezone.utc) if now > self.session_exp: @@ -118,7 +112,10 @@ def _request(self, url, req_type="GET", post_data=None, args=None, limit=None): args = self._clean_dict(args) if args else args post_data = self._clean_dict(post_data) if post_data else post_data - res = request_fn[req_type](url, headers=headers, json=post_data, params=args) + + res = request_fn[req_type]( + url, headers=headers, json=post_data, params=args) + logger.debug(f"{res.url} - {res.status_code}") logger.debug(res.request.body) @@ -137,7 +134,8 @@ def _request(self, url, req_type="GET", post_data=None, args=None, limit=None): if "data" not in res_json: return res_json - total_count = 0 if "totalCount" not in res_json else res_json["totalCount"] + total_count = (0 if "totalCount" not in res_json + else res_json["totalCount"]) data = res_json["data"] if not limit: @@ -149,8 +147,7 @@ def _request(self, url, req_type="GET", post_data=None, args=None, limit=None): args["cursor"] = cursor args["limit"] = LIMIT_MAX res = request_fn[req_type]( - url, headers=headers, json=post_data, params=args - ) + url, headers=headers, json=post_data, params=args) data.extend(res.json()["data"]) @@ -167,8 +164,7 @@ def _request(self, url, req_type="GET", post_data=None, args=None, limit=None): args["cursor"] = cursor args["limit"] = min(LIMIT_MAX, total_need - len(data)) res = request_fn[req_type]( - url, headers=headers, json=post_data, params=args - ) + url, headers=headers, json=post_data, params=args) data.extend(res.json()["data"]) diff --git a/parsons/pdi/questions.py b/parsons/pdi/questions.py index 552e1fcb24..024879eafa 100644 --- a/parsons/pdi/questions.py +++ b/parsons/pdi/questions.py @@ -18,54 +18,5 @@ def get_questions(self, limit=None): parsons.Table A Parsons table of all the data. """ - return self._request(self.url_questions, limit=limit) - - def get_question(self, id: str): - """ - Get a Question by id. - `Args:` - id: str - The Question id - `Returns:` - parsons.Table - A Parsons table of all the data. - """ - return self._request(f"{self.url_questions}/{id}") - - def create_question( - self, - question: str, - type: str, - category: str, - answer_options: list, - question_label: str = None, - question_description: str = None, - candidate_issue_id: str = None, - default: bool = True, - *args, - ): - """ - answer_options:[ - { - "id": "string", - "flagId": "string", - "displayDescription": "string", - "displayCode": "string" - } - ] - """ - payload = { - "question": question, - "questionLabel": question_label, - "questionDescription": question_description, - "type": type, - "category": category, - "candidateIssueId": candidate_issue_id, - "default": default, - "answerOptions": answer_options, - } - return self._request(self.locations_url, req_type="POST", post_data=payload) - - def delete_question(self, id: str): - return self._request(f"{self.url_questions}/{id}", req_type="DELETE") + return self._request(self.url_questions, limit=limit) diff --git a/parsons/pdi/universes.py b/parsons/pdi/universes.py index 98fdf2105b..e52b47a271 100644 --- a/parsons/pdi/universes.py +++ b/parsons/pdi/universes.py @@ -20,16 +20,3 @@ def get_universes(self, limit=None): """ return self._request(self.url_universes, limit=limit) - - def get_universe(self, id: str): - """ - Get a Universe by id. - - `Args:` - id: str - The Universe id - `Returns:` - parsons.Table - A Parsons table of all the data. - """ - return self._request(f"{self.url_universes}/{id}") diff --git a/parsons/phone2action/__init__.py b/parsons/phone2action/__init__.py index cbac2498f7..62fd7984ff 100644 --- a/parsons/phone2action/__init__.py +++ b/parsons/phone2action/__init__.py @@ -1,3 +1,5 @@ from parsons.phone2action.p2a import Phone2Action -__all__ = ["Phone2Action"] +__all__ = [ + 'Phone2Action' +] diff --git a/parsons/phone2action/p2a.py b/parsons/phone2action/p2a.py index 1896066d70..7c99d60218 100644 --- a/parsons/phone2action/p2a.py +++ b/parsons/phone2action/p2a.py @@ -1,8 +1,14 @@ -from parsons.capitol_canary import CapitolCanary +from requests.auth import HTTPBasicAuth +from parsons.etl import Table +from parsons.utilities import check_env +from parsons.utilities.api_connector import APIConnector +from parsons.utilities.datetime import date_to_timestamp import logging logger = logging.getLogger(__name__) +PHONE2ACTION_URI = 'https://api.phone2action.com/2.0/' + class Phone2Action(object): """ @@ -20,22 +26,34 @@ class Phone2Action(object): """ def __init__(self, app_id=None, app_key=None): - self.capitol_canary = CapitolCanary(app_id, app_key) - logger.warning( - "The Phone2Action class is being deprecated and replaced by CapitalCanary" - ) - - def __getattr__(self, name): - try: - return getattr(self.capitol_canary, name) - except AttributeError: - raise AttributeError( - f"{type(self).__name__} object has no attribute {name}" - ) - - def get_advocates( - self, state=None, campaign_id=None, updated_since=None, page=None - ): + + self.app_id = check_env.check('PHONE2ACTION_APP_ID', app_id) + self.app_key = check_env.check('PHONE2ACTION_APP_KEY', app_key) + self.auth = HTTPBasicAuth(self.app_id, self.app_key) + self.client = APIConnector(PHONE2ACTION_URI, auth=self.auth) + + def _paginate_request(self, url, args=None, page=None): + # Internal pagination method + + if page is not None: + args['page'] = page + + r = self.client.get_request(url, params=args) + + json = r['data'] + + if page is not None: + return json + + # If count of items is less than the total allowed per page, paginate + while r['pagination']['count'] == r['pagination']['per_page']: + + r = self.client.get_request(r['pagination']['next_url'], args) + json.extend(r['data']) + + return json + + def get_advocates(self, state=None, campaign_id=None, updated_since=None, page=None): """ Return advocates (person records). @@ -64,18 +82,52 @@ def get_advocates( * fields * advocates """ - return self.capitol_canary.get_advocates( - state, campaign_id, updated_since, page - ) - - def get_campaigns( - self, - state=None, - zip=None, - include_generic=False, - include_private=False, - include_content=True, - ): + + # Convert the passed in updated_since into a Unix timestamp (which is what the API wants) + updated_since = date_to_timestamp(updated_since) + + args = {'state': state, + 'campaignid': campaign_id, + 'updatedSince': updated_since} + + logger.info('Retrieving advocates...') + json = self._paginate_request('advocates', args=args, page=page) + + return self._advocates_tables(Table(json)) + + def _advocates_tables(self, tbl): + # Convert the advocates nested table into multiple tables + + tbls = { + 'advocates': tbl, + 'emails': Table(), + 'phones': Table(), + 'memberships': Table(), + 'tags': Table(), + 'ids': Table(), + 'fields': Table(), + } + + if not tbl: + return tbls + + logger.info(f'Retrieved {tbl.num_rows} advocates...') + + # Unpack all of the single objects + # The Phone2Action API docs says that created_at and updated_at are dictionaries, but + # the data returned from the server is a ISO8601 timestamp. - EHS, 05/21/2020 + for c in ['address', 'districts']: + tbl.unpack_dict(c) + + # Unpack all of the arrays + child_tables = [child for child in tbls.keys() if child != 'advocates'] + for c in child_tables: + tbls[c] = tbl.long_table(['id'], c, key_rename={'id': 'advocate_id'}) + + return tbls + + def get_campaigns(self, state=None, zip=None, include_generic=False, include_private=False, + include_content=True): """ Returns a list of campaigns @@ -96,28 +148,35 @@ def get_campaigns( See :ref:`parsons-table` for output options. """ - return self.capitol_canary.get_campaigns( - state, zip, include_generic, include_private, include_content - ) - - def create_advocate( - self, - campaigns, - first_name=None, - last_name=None, - email=None, - phone=None, - address1=None, - address2=None, - city=None, - state=None, - zip5=None, - sms_optin=None, - email_optin=None, - sms_optout=None, - email_optout=None, - **kwargs, - ): + args = {'state': state, + 'zip': zip, + 'includeGeneric': str(include_generic), + 'includePrivate': str(include_private) + } + + tbl = Table(self.client.get_request('campaigns', params=args)) + tbl.unpack_dict('updated_at') + if include_content: + tbl.unpack_dict('content') + + return tbl + + def create_advocate(self, + campaigns, + first_name=None, + last_name=None, + email=None, + phone=None, + address1=None, + address2=None, + city=None, + state=None, + zip5=None, + sms_optin=None, + email_optin=None, + sms_optout=None, + email_optout=None, + **kwargs): """ Create an advocate. @@ -171,36 +230,72 @@ def create_advocate( `Returns:` The int ID of the created advocate. """ - return self.capitol_canary.create_advocate( - campaigns, - first_name, - last_name, - email, - phone, - address1, - address2, - city, - state, - zip5, - sms_optin, - email_optin, - sms_optout, - email_optout, - **kwargs, - ) - - def update_advocate( - self, - advocate_id, - campaigns=None, - email=None, - phone=None, - sms_optin=None, - email_optin=None, - sms_optout=None, - email_optout=None, - **kwargs, - ): + + # Validate the passed in arguments + + if not campaigns: + raise ValueError( + 'When creating an advocate, you must specify one or more campaigns.') + + if not email and not phone: + raise ValueError( + 'When creating an advocate, you must provide an email address or a phone number.') + + if (sms_optin or sms_optout) and not phone: + raise ValueError( + 'When opting an advocate in or out of SMS messages, you must specify a valid ' + 'phone and one or more campaigns') + + if (email_optin or email_optout) and not email: + raise ValueError( + 'When opting an advocate in or out of email messages, you must specify a valid ' + 'email address and one or more campaigns') + + # Align our arguments with the expected parameters for the API + payload = { + 'email': email, + 'phone': phone, + 'firstname': first_name, + 'lastname': last_name, + 'address1': address1, + 'address2': address2, + 'city': city, + 'state': state, + 'zip5': zip5, + 'smsOptin': 1 if sms_optin else None, + 'emailOptin': 1 if email_optin else None, + 'smsOptout': 1 if sms_optout else None, + 'emailOptout': 1 if email_optout else None, + } + + # Clean up any keys that have a "None" value + payload = { + key: val + for key, val in payload.items() + if val is not None + } + + # Merge in any kwargs + payload.update(kwargs) + + # Turn into a list of items so we can append multiple campaigns + campaign_keys = [('campaigns[]', val) for val in campaigns] + data = [(key, value) for key, value in payload.items()] + campaign_keys + + # Call into the Phone2Action API + response = self.client.post_request('advocates', data=data) + return response['advocateid'] + + def update_advocate(self, + advocate_id, + campaigns=None, + email=None, + phone=None, + sms_optin=None, + email_optin=None, + sms_optout=None, + email_optout=None, + **kwargs): """ Update the fields of an advocate. @@ -238,14 +333,47 @@ def update_advocate( **kwargs: Additional fields on the advocate to update """ - return self.capitol_canary.update_advocate( - advocate_id, - campaigns, - email, - phone, - sms_optin, - email_optin, - sms_optout, - email_optout, - **kwargs, - ) + + # Validate the passed in arguments + if (sms_optin or sms_optout) and not (phone and campaigns): + raise ValueError( + 'When opting an advocate in or out of SMS messages, you must specify a valid ' + 'phone and one or more campaigns') + + if (email_optin or email_optout) and not (email and campaigns): + raise ValueError( + 'When opting an advocate in or out of email messages, you must specify a valid ' + 'email address and one or more campaigns') + + # Align our arguments with the expected parameters for the API + payload = { + 'advocateid': advocate_id, + 'campaigns': campaigns, + 'email': email, + 'phone': phone, + 'smsOptin': 1 if sms_optin else None, + 'emailOptin': 1 if email_optin else None, + 'smsOptout': 1 if sms_optout else None, + 'emailOptout': 1 if email_optout else None, + # remap first_name / last_name to be consistent with updated_advocates + 'firstname': kwargs.pop('first_name', None), + 'lastname': kwargs.pop('last_name', None), + } + + # Clean up any keys that have a "None" value + payload = { + key: val + for key, val in payload.items() + if val is not None + } + + # Merge in any kwargs + payload.update(kwargs) + + # Turn into a list of items so we can append multiple campaigns + campaigns = campaigns or [] + campaign_keys = [('campaigns[]', val) for val in campaigns] + data = [(key, value) for key, value in payload.items()] + campaign_keys + + # Call into the Phone2Action API + self.client.post_request('advocates', data=data) diff --git a/parsons/quickbase/__init__.py b/parsons/quickbase/__init__.py index a5e20f4770..37831c12da 100644 --- a/parsons/quickbase/__init__.py +++ b/parsons/quickbase/__init__.py @@ -1,3 +1,5 @@ from parsons.quickbase.quickbase import Quickbase -__all__ = ["Quickbase"] +__all__ = [ + 'Quickbase' +] diff --git a/parsons/quickbase/quickbase.py b/parsons/quickbase/quickbase.py index b710b43f86..e3c0ab11c8 100644 --- a/parsons/quickbase/quickbase.py +++ b/parsons/quickbase/quickbase.py @@ -21,18 +21,13 @@ class Quickbase(object): `Returns:` Quickbase Class """ - def __init__(self, hostname=None, user_token=None): - self.hostname = check_env.check("QUICKBASE_HOSTNAME", hostname) - self.user_token = check_env.check("QUICKBASE_USER_TOKEN", user_token) - self.api_hostname = "https://api.quickbase.com/v1" - self.client = APIConnector( - self.api_hostname, - headers={ - "QB-Realm-Hostname": self.hostname, - "AUTHORIZATION": f"QB-USER-TOKEN {self.user_token}", - }, - ) + self.hostname = check_env.check('QUICKBASE_HOSTNAME', hostname) + self.user_token = check_env.check('QUICKBASE_USER_TOKEN', user_token) + self.api_hostname = 'https://api.quickbase.com/v1' + self.client = APIConnector(self.api_hostname, + headers={'QB-Realm-Hostname': self.hostname, + 'AUTHORIZATION': f'QB-USER-TOKEN {self.user_token}'}) def get_app_tables(self, app_id=None): """ @@ -46,11 +41,9 @@ def get_app_tables(self, app_id=None): `Returns:` Table Class """ - return Table( - self.client.request( - f"{self.api_hostname}/tables?appId={app_id}", "GET" - ).json() - ) + return Table(self.client.request( + f'{self.api_hostname}/tables?appId={app_id}', + 'GET').json()) def query_records(self, table_from=None): """ @@ -64,24 +57,25 @@ def query_records(self, table_from=None): `Returns:` Table Class """ - req_resp = self.client.request( - f"{self.api_hostname}/records/query", "POST", json={"from": table_from} - ).json() + req_resp = \ + (self.client.request(f'{self.api_hostname}/records/query', + 'POST', + json={"from": table_from}).json()) - resp_tbl = Table(req_resp["data"]) + resp_tbl = Table(req_resp['data']) cleaned_tbl = Table() for row in resp_tbl: row_dict = {} for column in resp_tbl.columns: - row_dict[column] = row[column]["value"] + row_dict[column] = row[column]['value'] cleaned_tbl.concat(Table([row_dict])) cleaned_tbl.materialize() - column_resp = req_resp["fields"] + column_resp = req_resp['fields'] column_map = {} for entry in column_resp: - column_map[str(entry["id"])] = entry["label"].lower().strip() + column_map[str(entry['id'])] = entry['label'].lower().strip() for column in cleaned_tbl.columns: cleaned_tbl.rename_column(column, column_map[column]) diff --git a/parsons/redash/__init__.py b/parsons/redash/__init__.py index cdf1839e4a..5f88998808 100644 --- a/parsons/redash/__init__.py +++ b/parsons/redash/__init__.py @@ -1,3 +1,5 @@ from parsons.redash.redash import Redash -__all__ = ["Redash"] +__all__ = [ + 'Redash' +] diff --git a/parsons/redash/redash.py b/parsons/redash/redash.py index d68642a781..2f87d0b88b 100644 --- a/parsons/redash/redash.py +++ b/parsons/redash/redash.py @@ -34,118 +34,41 @@ class Redash(object): Redash Class """ - def __init__( - self, - base_url=None, - user_api_key=None, - pause_time=3, - timeout=0, # never timeout - verify=True, - ): - self.base_url = check("REDASH_BASE_URL", base_url) - self.user_api_key = check("REDASH_USER_API_KEY", user_api_key, optional=True) - self.pause = int(check("REDASH_PAUSE_TIME", pause_time, optional=True)) - self.timeout = int(check("REDASH_TIMEOUT", timeout, optional=True)) + def __init__(self, + base_url=None, + user_api_key=None, + pause_time=3, + timeout=0, # never timeout + verify=True): + self.base_url = check('REDASH_BASE_URL', base_url) + self.user_api_key = check('REDASH_USER_API_KEY', user_api_key, optional=True) + self.pause = int(check('REDASH_PAUSE_TIME', pause_time, optional=True)) + self.timeout = int(check('REDASH_TIMEOUT', timeout, optional=True)) self.verify = verify # for https requests self.session = requests.Session() if user_api_key: - self.session.headers.update({"Authorization": f"Key {user_api_key}"}) - - def _catch_runtime_error(self, res): - if res.status_code != 200: - raise RuntimeError( - f"Error. Status code: {res.status_code}. Reason: {res.reason}" - ) + self.session.headers.update({'Authorization': f'Key {user_api_key}'}) def _poll_job(self, session, job, query_id): start_secs = time.time() - while job["status"] not in (3, 4): + while job['status'] not in (3, 4): if self.timeout and start_secs + self.timeout < time.time(): - raise RedashTimeout(f"Redash timeout: {self.timeout}") - poll_url = "{}/api/jobs/{}".format(self.base_url, job["id"]) + raise RedashTimeout(f'Redash timeout: {self.timeout}') + poll_url = '{}/api/jobs/{}'.format(self.base_url, job['id']) response = session.get(poll_url, verify=self.verify) response_json = response.json() job = response_json.get( - "job", - { - "status": "Error NO JOB IN RESPONSE: {}".format( - json.dumps(response_json) - ) - }, - ) - logger.debug( - "poll url:%s id:%s status:%s err:%s", - poll_url, - query_id, - job["status"], - job.get("error"), - ) + 'job', + {'status': 'Error NO JOB IN RESPONSE: {}'.format(json.dumps(response_json))}) + logger.debug("poll url:%s id:%s status:%s err:%s", + poll_url, query_id, job['status'], job.get('error')) time.sleep(self.pause) - if job["status"] == 3: # 3 = completed - return job["query_result_id"] - elif job["status"] == 4: # 3 = ERROR - raise RedashQueryFailed( - "Redash Query {} failed: {}".format(query_id, job["error"]) - ) - - def get_data_source(self, data_source_id): - """ - Get a data source. - - `Args:` - data_source_id: int or str - ID of data source. - `Returns`: - Data source json object - """ - res = self.session.get(f"{self.base_url}/api/data_sources/{data_source_id}") - self._catch_runtime_error(res) - return res.json() - - def update_data_source( - self, data_source_id, name, type, dbName, host, password, port, user - ): - """ - Update a data source. - - `Args:` - data_source_id: str or int - ID of data source. - name: str - Name of data source. - type: str - Type of data source. - dbname: str - Database name of data source. - host: str - Host of data source. - password: str - Password of data source. - port: int or str - Port of data source. - user: str - Username of data source. - `Returns:` - ``None`` - """ - self._catch_runtime_error( - self.session.post( - f"{self.base_url}/api/data_sources/{data_source_id}", - json={ - "name": name, - "type": type, - "options": { - "dbname": dbName, - "host": host, - "password": password, - "port": port, - "user": user, - }, - }, - ) - ) + if job['status'] == 3: # 3 = completed + return job['query_result_id'] + elif job['status'] == 4: # 3 = ERROR + raise RedashQueryFailed('Redash Query {} failed: {}'.format(query_id, job['error'])) def get_fresh_query_results(self, query_id=None, params=None): """ @@ -168,40 +91,30 @@ def get_fresh_query_results(self, query_id=None, params=None): `Returns:` Table Class """ - query_id = check("REDASH_QUERY_ID", query_id, optional=True) - params_from_env = check("REDASH_QUERY_PARAMS", "", optional=True) - redash_params = ( - {"p_%s" % k: str(v).replace("'", "''") for k, v in params.items()} - if params - else {} - ) + query_id = check('REDASH_QUERY_ID', query_id, optional=True) + params_from_env = check('REDASH_QUERY_PARAMS', '', optional=True) + redash_params = ({'p_%s' % k: str(v).replace("'", "''") for k, v in params.items()} + if params else {}) response = self.session.post( - f"{self.base_url}/api/queries/{query_id}/refresh?{params_from_env}", + f'{self.base_url}/api/queries/{query_id}/refresh?{params_from_env}', params=redash_params, - verify=self.verify, - ) + verify=self.verify) if response.status_code != 200: - raise RedashQueryFailed( - f"Refresh failed for query {query_id}. {response.text}" - ) + raise RedashQueryFailed(f'Refresh failed for query {query_id}. {response.text}') - job = response.json()["job"] + job = response.json()['job'] result_id = self._poll_job(self.session, job, query_id) if result_id: response = self.session.get( - f"{self.base_url}/api/queries/{query_id}/results/{result_id}.csv", - verify=self.verify, - ) + f'{self.base_url}/api/queries/{query_id}/results/{result_id}.csv', + verify=self.verify) if response.status_code != 200: raise RedashQueryFailed( - f"Failed getting results for query {query_id}. {response.text}" - ) + f'Failed getting results for query {query_id}. {response.text}') else: - raise RedashQueryFailed( - f"Failed getting result {query_id}. {response.text}" - ) + raise RedashQueryFailed(f'Failed getting result {query_id}. {response.text}') return Table.from_csv_string(response.text) def get_cached_query_results(self, query_id=None, query_api_key=None): @@ -218,20 +131,16 @@ def get_cached_query_results(self, query_id=None, query_api_key=None): `Returns:` Table Class """ - query_id = check("REDASH_QUERY_ID", query_id) - query_api_key = check("REDASH_QUERY_API_KEY", query_api_key, optional=True) + query_id = check('REDASH_QUERY_ID', query_id) + query_api_key = check('REDASH_QUERY_API_KEY', query_api_key, optional=True) params = {} if not self.user_api_key and query_api_key: - params["api_key"] = query_api_key - response = self.session.get( - f"{self.base_url}/api/queries/{query_id}/results.csv", - params=params, - verify=self.verify, - ) + params['api_key'] = query_api_key + response = self.session.get(f'{self.base_url}/api/queries/{query_id}/results.csv', + params=params, + verify=self.verify) if response.status_code != 200: - raise RedashQueryFailed( - f"Failed getting results for query {query_id}. {response.text}" - ) + raise RedashQueryFailed(f'Failed getting results for query {query_id}. {response.text}') return Table.from_csv_string(response.text) @classmethod @@ -265,17 +174,11 @@ def load_to_table(cls, refresh=True, **kwargs): `Returns:` Table Class """ - initargs = { - a: kwargs.get(a) - for a in ("base_url", "user_api_key", "pause_time", "timeout", "verify") - if a in kwargs - } + initargs = {a: kwargs.get(a) + for a in ('base_url', 'user_api_key', 'pause_time', 'timeout', 'verify') + if a in kwargs} obj = cls(**initargs) - if not refresh or kwargs.get("query_api_key"): - return obj.get_cached_query_results( - kwargs.get("query_id"), kwargs.get("query_api_key") - ) + if not refresh or kwargs.get('query_api_key'): + return obj.get_cached_query_results(kwargs.get('query_id'), kwargs.get('query_api_key')) else: - return obj.get_fresh_query_results( - kwargs.get("query_id"), kwargs.get("params") - ) + return obj.get_fresh_query_results(kwargs.get('query_id'), kwargs.get('params')) diff --git a/parsons/rockthevote/rtv.py b/parsons/rockthevote/rtv.py index 3662da22ca..a35877802d 100644 --- a/parsons/rockthevote/rtv.py +++ b/parsons/rockthevote/rtv.py @@ -18,7 +18,7 @@ TESTING_URI = "https://staging.rocky.rockthevote.com/api/v4" PRODUCTION_URI = "https://register.rockthevote.com/api/v4" -DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S UTC" +DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S UTC' """Datetime format for sending date's to the API.""" REQUEST_HEADERS = { @@ -27,11 +27,11 @@ # though it seems fine with the curl user agent # For more info on user agents, see: # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/User-Agent - "user-agent": "curl/7.54.0" + 'user-agent': 'curl/7.54.0' } """Standard request header for sending requests to the API.""" -STATUS_URL_PARSE_REGEX = re.compile(r"(\d+)$") +STATUS_URL_PARSE_REGEX = re.compile(r'(\d+)$') """Regex for parsing the report ID from the status URL.""" @@ -57,8 +57,8 @@ class RockTheVote: """ def __init__(self, partner_id=None, partner_api_key=None, testing=False): - self.partner_id = check_env.check("RTV_PARTNER_ID", partner_id) - self.partner_api_key = check_env.check("RTV_PARTNER_API_KEY", partner_api_key) + self.partner_id = check_env.check('RTV_PARTNER_ID', partner_id) + self.partner_api_key = check_env.check('RTV_PARTNER_API_KEY', partner_api_key) if testing: self.client = APIConnector(TESTING_URI, headers=REQUEST_HEADERS) @@ -83,11 +83,11 @@ def create_registration_report(self, before=None, since=None, report_type=None): int The ID of the created report. """ - report_url = "registrant_reports.json" + report_url = 'registrant_reports.json' # Create the report for the new data report_parameters = { - "partner_id": self.partner_id, - "partner_API_key": self.partner_api_key, + 'partner_id': self.partner_id, + 'partner_API_key': self.partner_api_key, } # Declare these here so the logging doesn't error out @@ -95,38 +95,35 @@ def create_registration_report(self, before=None, since=None, report_type=None): if report_type: if report_type not in VALID_REPORT_TYPES: - raise RTVFailure( - f"Invalid report type. Must be one of {VALID_REPORT_TYPES}" - ) + raise RTVFailure(f"Invalid report type. Must be one of {VALID_REPORT_TYPES}") report_parameters["report_type"] = report_type if since: since_date = parse_date(since).strftime(DATETIME_FORMAT) - report_parameters["since"] = since_date + report_parameters['since'] = since_date if before: before_date = parse_date(before).strftime(DATETIME_FORMAT) - report_parameters["before"] = before_date + report_parameters['before'] = before_date # The report parameters get passed into the request as JSON in the body # of the request. report_str = f"{report_type} report" if report_type else "report" logger.info( f"Creating {report_str} for {self.partner_id} " - f"for dates: {since_date} to {before_date}..." - ) - response = self.client.request(report_url, "post", json=report_parameters) + f"for dates: {since_date} to {before_date}...") + response = self.client.request(report_url, 'post', json=report_parameters) if response.status_code != requests.codes.ok: raise RTVFailure("Couldn't create RTV registrations report") response_json = response.json() # The RTV API says the response should include the report_id, but I have not found # that to be the case - report_id = response_json.get("report_id") + report_id = response_json.get('report_id') if report_id: logger.info(f"Created report with id {report_id}.") return report_id # If the response didn't include the report_id, then we will parse it out of the URL. - status_url = response_json.get("status_url") + status_url = response_json.get('status_url') url_match = STATUS_URL_PARSE_REGEX.search(status_url) if url_match: report_id = url_match.group(1) @@ -134,13 +131,8 @@ def create_registration_report(self, before=None, since=None, report_type=None): logger.info(f"Created report with id {report_id}.") return report_id - def get_registration_report( - self, - report_id, - block=False, - poll_interval_seconds=60, - report_timeout_seconds=3600, - ): + def get_registration_report(self, report_id, block=False, poll_interval_seconds=60, + report_timeout_seconds=3600): """ Get data from an existing registration report. @@ -159,35 +151,32 @@ def get_registration_report( """ logger.info(f"Getting report with id {report_id}...") credentials = { - "partner_id": self.partner_id, - "partner_API_key": self.partner_api_key, + 'partner_id': self.partner_id, + 'partner_API_key': self.partner_api_key, } - status_url = f"registrant_reports/{report_id}" + status_url = f'registrant_reports/{report_id}' download_url = None # Let's figure out at what time should we just give up because we waited # too long - end_time = datetime.datetime.now() + datetime.timedelta( - seconds=report_timeout_seconds - ) + end_time = datetime.datetime.now() + datetime.timedelta(seconds=report_timeout_seconds) # If we have a download URL, we can move on and just download the # report. Otherwise, as long as we haven't run out of time, we will # check the status. while not download_url and datetime.datetime.now() < end_time: logger.debug( - f"Registrations report not ready yet, sleeping {poll_interval_seconds} seconds" - ) + f'Registrations report not ready yet, sleeping {poll_interval_seconds} seconds') # Check the status again via the status endpoint - status_response = self.client.request(status_url, "get", params=credentials) + status_response = self.client.request(status_url, 'get', params=credentials) # Check to make sure the call got a valid response if status_response.status_code == requests.codes.ok: status_json = status_response.json() # Grab the download_url from the response. - download_url = status_json.get("download_url") + download_url = status_json.get('download_url') if not download_url and not block: return None @@ -202,10 +191,10 @@ def get_registration_report( # If we never got a valid download_url, then we timed out waiting for # the report to generate. We will log an error and exit. if not download_url: - raise RTVFailure("Timed out waiting for report") + raise RTVFailure('Timed out waiting for report') # Download the report data - download_response = self.client.request(download_url, "get", params=credentials) + download_response = self.client.request(download_url, 'get', params=credentials) # Check to make sure the call got a valid response if download_response.status_code == requests.codes.ok: @@ -217,24 +206,20 @@ def get_registration_report( # Transform the data from the report's CSV format to something more # Pythonic (snake case) normalized_column_names = [ - re.sub(r"\s", "_", name).lower() for name in table.columns + re.sub(r'\s', '_', name).lower() + for name in table.columns ] normalized_column_names = [ - re.sub(r"[^A-Za-z\d_]", "", name) for name in normalized_column_names + re.sub(r'[^A-Za-z\d_]', '', name) + for name in normalized_column_names ] table.table = petl.setheader(table.table, normalized_column_names) return table else: - raise RTVFailure("Unable to download report data") - - def run_registration_report( - self, - before=None, - since=None, - report_type=None, - poll_interval_seconds=60, - report_timeout_seconds=3600, - ): + raise RTVFailure('Unable to download report data') + + def run_registration_report(self, before=None, since=None, report_type=None, + poll_interval_seconds=60, report_timeout_seconds=3600): """ Run a new registration report. @@ -262,21 +247,15 @@ def run_registration_report( report_str = f"{report_type} report" if report_type else "report" logger.info( f"Running {report_str} for {self.partner_id} " - f"for dates: {since} to {before}..." - ) + f"for dates: {since} to {before}...") report_id = self.create_registration_report( - before=before, since=since, report_type=report_type - ) - return self.get_registration_report( - report_id, - block=True, - poll_interval_seconds=poll_interval_seconds, - report_timeout_seconds=report_timeout_seconds, - ) - - def get_state_requirements( - self, lang, home_state_id, home_zip_code, date_of_birth=None, callback=None - ): + before=before, since=since, report_type=report_type) + return self.get_registration_report(report_id, block=True, + poll_interval_seconds=poll_interval_seconds, + report_timeout_seconds=report_timeout_seconds) + + def get_state_requirements(self, lang, home_state_id, home_zip_code, + date_of_birth=None, callback=None): """ Checks state eligibility and provides state specific fields information. Args: @@ -294,25 +273,23 @@ def get_state_requirements( Parsons.Table A single row table with the response json """ - requirements_url = "state_requirements.json" + requirements_url = 'state_requirements.json' logger.info(f"Getting the requirements for {home_state_id}...") params = { - "lang": lang, - "home_state_id": home_state_id, - "home_zip_code": home_zip_code, + 'lang': lang, + 'home_state_id': home_state_id, + 'home_zip_code': home_zip_code } if date_of_birth: - params["date_of_birth"] = date_of_birth + params['date_of_birth'] = date_of_birth if callback: - params["callback"] = callback + params['callback'] = callback - requirements_response = self.client.request( - requirements_url, "get", params=params - ) + requirements_response = self.client.request(requirements_url, 'get', params=params) if requirements_response.status_code == requests.codes.ok: response_json = requirements_response.json() @@ -320,5 +297,5 @@ def get_state_requirements( return table else: error_json = requirements_response.json() - logger.info(f"{error_json}") + logger.info(f'{error_json}') raise RTVFailure("Could not retrieve state requirements") diff --git a/parsons/salesforce/__init__.py b/parsons/salesforce/__init__.py index a06fbe9d1d..dbf61fc1d0 100644 --- a/parsons/salesforce/__init__.py +++ b/parsons/salesforce/__init__.py @@ -1,3 +1,5 @@ from parsons.salesforce.salesforce import Salesforce -__all__ = ["Salesforce"] +__all__ = [ + 'Salesforce' +] diff --git a/parsons/salesforce/salesforce.py b/parsons/salesforce/salesforce.py index de57b3fcc4..cea5292335 100644 --- a/parsons/salesforce/salesforce.py +++ b/parsons/salesforce/salesforce.py @@ -1,5 +1,6 @@ from simple_salesforce import Salesforce as _Salesforce from parsons.utilities import check_env +from parsons.etl import Table import logging import json @@ -28,18 +29,14 @@ class Salesforce: Salesforce class """ - def __init__( - self, username=None, password=None, security_token=None, test_environment=False - ): + def __init__(self, username=None, password=None, security_token=None, test_environment=False): - self.username = check_env.check("SALESFORCE_USERNAME", username) - self.password = check_env.check("SALESFORCE_PASSWORD", password) - self.security_token = check_env.check( - "SALESFORCE_SECURITY_TOKEN", security_token - ) + self.username = check_env.check('SALESFORCE_USERNAME', username) + self.password = check_env.check('SALESFORCE_PASSWORD', password) + self.security_token = check_env.check('SALESFORCE_SECURITY_TOKEN', security_token) if test_environment: - self.domain = check_env.check("SALESFORCE_DOMAIN", "test") + self.domain = check_env.check('SALESFORCE_DOMAIN', 'test') else: self.domain = None @@ -67,7 +64,7 @@ def describe_fields(self, object): Dict of all the object's field meta data in Salesforce """ - return json.loads(json.dumps(getattr(self.client, object).describe()["fields"])) + return json.loads(json.dumps(getattr(self.client, object).describe()['fields'])) def query(self, soql): """ @@ -77,11 +74,10 @@ def query(self, soql): For reference, see the `Salesforce SOQL documentation `_. `Returns:` list of dicts with Salesforce data - """ # noqa: E501,E261 + """ # noqa: E501,E261 - q = self.client.query_all(soql) - q = json.loads(json.dumps(q)) - logger.info(f"Found {q['totalSize']} results") + q = Table(self.client.query_all(soql)) + logger.info(f'Found {q.num_rows} results') return q def insert_record(self, object, data_table): @@ -105,9 +101,9 @@ def insert_record(self, object, data_table): """ r = getattr(self.client.bulk, object).insert(data_table.to_dicts()) - s = [x for x in r if x.get("success") is True] + s = [x for x in r if x.get('success') is True] logger.info( - f"Successfully inserted {len(s)} out of {data_table.num_rows} records to {object}" + f'Successfully inserted {len(s)} out of {data_table.num_rows} records to {object}' ) return r @@ -132,9 +128,9 @@ def update_record(self, object, data_table): """ r = getattr(self.client.bulk, object).update(data_table.to_dicts()) - s = [x for x in r if x.get("success") is True] + s = [x for x in r if x.get('success') is True] logger.info( - f"Successfully updated {len(s)} out of {data_table.num_rows} records in {object}" + f'Successfully updated {len(s)} out of {data_table.num_rows} records in {object}' ) return r @@ -162,9 +158,9 @@ def upsert_record(self, object, data_table, id_col): """ r = getattr(self.client.bulk, object).upsert(data_table.to_dicts(), id_col) - s = [x for x in r if x.get("success") is True] + s = [x for x in r if x.get('success') is True] logger.info( - f"Successfully upserted {len(s)} out of {data_table.num_rows} records to {object}" + f'Successfully upserted {len(s)} out of {data_table.num_rows} records to {object}' ) return r @@ -194,9 +190,9 @@ def delete_record(self, object, id_table, hard_delete=False): else: r = getattr(self.client.bulk, object).delete(id_table.to_dicts()) - s = [x for x in r if x.get("success") is True] + s = [x for x in r if x.get('success') is True] logger.info( - f"Successfully deleted {len(s)} out of {id_table.num_rows} records from {object}" + f'Successfully deleted {len(s)} out of {id_table.num_rows} records from {object}' ) return r @@ -215,7 +211,7 @@ def client(self): username=self.username, password=self.password, security_token=self.security_token, - domain=self.domain, + domain=self.domain ) return self._client diff --git a/parsons/scytl/__init__.py b/parsons/scytl/__init__.py deleted file mode 100644 index 79932da6a8..0000000000 --- a/parsons/scytl/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from parsons.scytl.scytl import Scytl - -__all__ = ["Scytl"] diff --git a/parsons/scytl/scytl.py b/parsons/scytl/scytl.py deleted file mode 100644 index 8873e0eb6d..0000000000 --- a/parsons/scytl/scytl.py +++ /dev/null @@ -1,696 +0,0 @@ -import zipfile -import csv -import requests -import xml.etree.ElementTree as ET -import typing as t -from datetime import datetime -from dateutil.parser import parse as parsedate -from pytz import timezone -from io import BytesIO, StringIO -from dataclasses import dataclass - -CLARITY_URL = "https://results.enr.clarityelections.com/" - -CURRENT_VERSION_URL_TEMPLATE = ( - CLARITY_URL + "{administrator}/{election_id}/current_ver.txt" -) -SUMMARY_CSV_ZIP_URL_TEMPLATE = ( - CLARITY_URL + "{administrator}/{election_id}/{version_num}/reports/summary.zip" -) -DETAIL_XML_ZIP_URL_TEMPLATE = ( - CLARITY_URL + "{administrator}/{election_id}/{version_num}/reports/detailxml.zip" -) -COUNTY_DETAIL_XML_ZIP_URL_TEMPLATE = ( - CLARITY_URL - + "{state}/{county_name}/{county_election_id}/{county_version_num}/reports/detailxml.zip" -) -ELECTION_SETTINGS_JSON_URL_TEMPLATE = ( - CLARITY_URL + "{state}/{election_id}/{version_num}/json/en/electionsettings.json" -) - -BROWSER_HEADERS = { - "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) " - + "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36" -} - -TZ_INFO = { - "EST": "UTC-5", - "EDT": "UTC-4", - "CST": "UTC-6", - "CDT": "UTC-5", - "MST": "UTC-7", - "MDT": "UTC-6", - "PST": "UTC-8", - "PDT": "UTC-7", - "AKST": "UTC-9", - "AKDT": "UTC-8", - "HST": "UTC-10", - "HDT": "UTC-9", -} - - -@dataclass -class CountyDetails: - """ - A class for keeping track of County election details. - - A dataclass is decorator that adds special functions including an - automatic __init__ function. See more here: https://docs.python.org/3/library/dataclasses.html - """ - - state: str - county_name: str - county_election_id: str - county_version_num: str - county_update_date: datetime = None - - -class Scytl: - """ - Instantiate a Scytl connector. - - `Args:`: - state: str - The two letter code of the state the publishing election results. - ex: GA - election_id: str - The numeric identifier for the election found in the url of the election's website. - ex: "114729" - county: str (optional) - The name of the county publishing the results. - ex: Clarke - """ - - def __init__(self, state: str, election_id: str, county=""): - self.state = state - self.county = county.replace(" ", "_") - - self.administrator = ( - f"{self.state}/{self.county}" if self.county else self.state - ) - self.election_id = election_id - - self.previous_summary_version_num = None - self.previous_details_version_num = None - self.previous_county_details_version_num = None - self.previous_county_details_list = None - self.previously_fetched_counties = set([]) - - def _parse_date_to_utc(self, input_dt: str) -> datetime: - """ - Parse datetime string as datetime in UTC - - `Args`: - input_dt: str - The datetime string to be parsed - `Returns`: - datetime | None - """ - - if input_dt is None: - return - - temp = parsedate(input_dt, tzinfos=TZ_INFO) - temp = temp.astimezone(timezone("UTC")) - - return temp - - def _get_version(self, administrator: str, election_id: str) -> str: - """ - Fetch the latest version of the election results from the Clarity site - - `Args`: - administrator: str - The url code for the election administrator, either the two-letter - state code or the state code and the county, separated by a slash - election_id: str - The election id for the given election as a string - `Returns`: - str - The version id as a string - """ - - config_version_url = CURRENT_VERSION_URL_TEMPLATE.format( - administrator=administrator, election_id=election_id - ) - - res = requests.get(config_version_url, headers=BROWSER_HEADERS) - - return res.text - - def _parse_file_from_zip_url(self, zipfile_url: str, file_name: str) -> bytes: - """ - Fetch a zip file from the given url and unzip to a byte array - - `Args`: - zipfile_url: str - The url where the zip file can be found - election_id: str - The expected name of the file in the zipfile to read - `Returns`: - bytes - The unzipped file as bytes - """ - - with BytesIO() as zipdata: - with requests.get(zipfile_url, headers=BROWSER_HEADERS) as res: - zipdata.write(res.content) - zipdata.flush() - - zf = zipfile.ZipFile(zipdata) - - with zf.open(file_name) as input: - return input.read() - - def _get_latest_counties_scytl_info( - self, state: str, election_id: str, version_num: str - ) -> t.Dict[str, CountyDetails]: - """ - Fetch the settings JSON file for the election and parse the county details - for participating counties in a state election. - - `Args`: - state: str - The two-letter state code for the state - election_id: str - The election ID for the given election - version_num: str - The latest version ID of the election as a string - `Returns`: - dict[str, CountyDetails] - A dictionary mapping county names to their sub-election information - """ - - county_dict = {} - - config_settings_json_url = ELECTION_SETTINGS_JSON_URL_TEMPLATE.format( - state=state, election_id=election_id, version_num=version_num - ) - - settings_json_res = requests.get( - config_settings_json_url, headers=BROWSER_HEADERS - ) - settings_json = settings_json_res.json() - - participating_counties = settings_json["settings"]["electiondetails"][ - "participatingcounties" - ] - - for county_row in participating_counties: - county_info = county_row.split("|") - source_county_name = county_info[0] - county_election_id = county_info[1] - county_version_num = county_info[2] - county_update_date = self._parse_date_to_utc(county_info[3]) - - county_details = CountyDetails( - state, - source_county_name, - county_election_id, - county_version_num, - county_update_date, - ) - - county_dict[source_county_name] = county_details - - return county_dict - - def _parse_county_xml_data_to_precincts( - self, county_data: bytes, county_details: CountyDetails - ) -> t.List[t.Dict]: - """ - Parse a detail XML file for a county into a list of election - results by precinct and vote method. - - `Args`: - county_data: bytes - The detail XML file for a county as bytes - county_details: str - The details class for the county, including name, - id, and last updated datetime - `Returns`: - list[dict] - The list of election results by precinct and vote method in the file. - """ - - tree = ET.fromstring(county_data) - - precinct_dict = {} - precinct_votes = [] - - root = tree - - for child in root: - - if child.tag == "VoterTurnout": - precincts = child[0] - - for precinct in precincts: - data = precinct.attrib - name = data.get("name") - - precinct_info = { - "total_voters": data.get("totalVoters"), - "ballots_cast": data.get("ballotsCast"), - "voter_turnout": data.get("voterTurnout"), - "percent_reporting": data.get("percentReporting"), - } - - precinct_dict[name] = precinct_info - - if child.tag == "Contest": - - office = child.attrib["text"] - - for choice in child: - cand_votes = {} - - if choice.tag == "VoteType": - continue - - source_cand_data = choice.attrib - cand_name = source_cand_data.get("text") - cand_party = source_cand_data.get("party") - - for vote_type in choice: - vote_type_label = vote_type.attrib["name"] - - for precinct in vote_type: - precinct_name = precinct.attrib["name"] - cand_votes[precinct_name] = int(precinct.attrib["votes"]) - - precinct_turnout = precinct_dict.get(precinct_name, {}) - - result = { - "state": county_details.state, - "county_name": county_details.county_name, - "county_id": county_details.county_election_id, - "office": office, - "ballots_cast": precinct_turnout.get("ballots_cast"), - "reg_voters": precinct_turnout.get("total_voters"), - "vote_method": vote_type_label, - "candidate_name": cand_name, - "candidate_party": cand_party, - "precinct_name": precinct_name, - "recorded_votes": cand_votes[precinct_name], - "voter_turnout": precinct_turnout.get("voter_turnout"), - "percent_reporting": precinct_turnout.get( - "percent_reporting" - ), - "timestamp_last_updated": county_details.county_update_date, - } - - precinct_votes.append(result) - - return precinct_votes - - def _parse_state_xml_data_to_counties( - self, state_data: bytes, state: str - ) -> t.List[t.Dict]: - """ - Parse a detail XML file for a state into a list of election - results by county and vote method. - - `Args`: - state_data: bytes - The detail XML file for a state as bytes - state: str - The two-letter state code for the state associated with the file - `Returns`: - list[dict] - The list of election results by state and vote method in the file. - """ - - root = ET.fromstring(state_data) - - county_dict = {} - county_votes = [] - - timestamp = None - - for child in root: - - if ( - child.tag == "Timestamp" - ): # 1/5/2021 3:22:30 PM EST - timestamp = self._parse_date_to_utc(child.text) - - if child.tag == "ElectionVoterTurnout": - counties = child[0] - - for county in counties: - data = county.attrib - name = data["name"] - - county_dict[name] = data - - if child.tag == "Contest": - - office = child.attrib["text"] - - for choice in child: - cand_votes = {} - - if choice.tag == "ParticipatingCounties": - continue - - source_cand_data = choice.attrib - cand_name = source_cand_data.get("text") - cand_party = source_cand_data.get("party") - - for vote_type in choice: - vote_type_label = vote_type.attrib["name"] - - for county in vote_type: - county_name = county.attrib["name"] - cand_votes[county_name] = int(county.attrib["votes"]) - - county_turnout = county_dict.get(county_name, {}) - - result = { - "state": state, - "county_name": county_name, - "office": office, - "ballots_cast": county_turnout.get("ballotsCast"), - "reg_voters": county_turnout.get("totalVoters"), - "precincts_reporting": county_turnout.get( - "precinctsReported" - ), - "total_precincts": county_turnout.get( - "precinctsParticipating" - ), - "vote_method": vote_type_label, - "candidate_name": cand_name, - "candidate_party": cand_party, - "recorded_votes": cand_votes[county_name], - "timestamp_last_updated": timestamp, - } - - county_votes.append(result) - - return county_votes - - def _fetch_and_parse_summary_results( - self, administrator: str, election_id: str, version_num: str, county="" - ) -> t.List[t.Dict]: - """ - Fetches the summary results CSV file from the Scytl site and parses it - into a list of election results by candidate. - - `Args`: - administrator: str - The url code for the election administrator, either the two-letter - state code or the state code and the county, separated by a slash - election_id: str - The election id for the given election as a string - version_num: str - The latest version ID of the election as a string - county: str - The name of the county associated with the summary file - `Returns`: - list[dict] - The list of election results by candidate. - """ - - summary_csv_zip_url = SUMMARY_CSV_ZIP_URL_TEMPLATE.format( - administrator=administrator, - election_id=election_id, - version_num=version_num, - ) - - zip_bytes = self._parse_file_from_zip_url(summary_csv_zip_url, "summary.csv") - - string_buffer = StringIO(zip_bytes.decode("latin-1")) - csv_data = csv.DictReader(string_buffer, delimiter=",") - - data = [ - { - "state": self.state, - "county_name": county or self.county, - "office": x.get("contest name"), - "ballots_cast": x.get("ballots cast"), - "reg_voters": x.get("registered voters"), - "counties_reporting": x.get("num Area rptg"), - "total_counties": x.get("num Area total"), - "precincts_reporting": x.get("num Precinct rptg"), - "total_precincts": x.get("num Precinct total"), - "candidate_name": x.get("choice name"), - "candidate_party": x.get("party name"), - "recorded_votes": x.get("total votes"), - } - for x in csv_data - ] - - return data - - def get_summary_results(self, force_update=False) -> t.List[t.Dict]: - """ - Fetch the latest summary results for the given election, across all contests. - - Please note that all electoral entities administer their elections differently, - so not all values will be populated if the entity doesn't provide them. - - `Args:` - force_update: bool - If this is False, the connector will check to see if the current version - matches the previously fetched version of the results. - If the version has not been changed, no results will be fetched or returned. - Default: false - `Returns:` - list[dict] - The list should contain entries for each candidate in each office. - Each row will contain the following: - - state - - county_name (if applicable) - - office - - ballots_cast (in the contest) - - reg_voters (eligible for the contest) - - counties_reporting - - total_counties - - precincts_reporting - - total_precincts - - candidate_name - - candidate_party (many administrators do not use this feature - and instead include the party in the candidate name) - - recorded_votes (votes cast for the candidate) - """ - - version_num = self._get_version(self.administrator, self.election_id) - - if not force_update and version_num == self.previous_summary_version_num: - return - - data = self._fetch_and_parse_summary_results( - self.administrator, self.election_id, version_num - ) - - self.previous_summary_version_num = version_num - - return data - - def get_detailed_results(self, force_update=False) -> t.List[t.Dict]: - """ - Fetch the latest detailed results by geography for the given election, across all contests. - - Please note that all electoral entities administer their elections differently, - so not all values will be populated if the entity doesn't provide them. - - `Args:` - force_update: bool - If this is False, the connector will check to see if the current version - matches the previously fetched version of the results. - If the version has not been changed, no results will be fetched or returned. - Default: false - `Returns:` - list[dict] - The list should contain entries for each candidate in each office, - per vote method and per county. - - If fetching for a state, results will look like: - - state - - county_name - - office - - ballots_cast - - reg_voters - - precincts_reporting - - total_precincts - - vote_method (note: some administrators choose to differentiate - results by vote method, while others do not) - - candidate_name - - candidate_party (many administrators do not use this - feature and instead include the party in the candidate name) - - recorded_votes (votes cast for the candidate - with this vote method in this county) - - timestamp_last_updated - - If fetching for a county, results will look like: - - state - - county_name - - county_id - - office - - ballots_cast - - reg_voters - - vote_method (note: some administrators choose to - differentiate results by vote method, while others do not) - - candidate_name - - candidate_party (many administrators do not use this - feature and instead include the party in the candidate name) - - precinct_name - - recorded_votes (votes cast for the candidate - with this vote method in this county) - - voter_turnout - - percent_reporting - - timestamp_last_updated - """ - - version_num = self._get_version(self.administrator, self.election_id) - - if not force_update and version_num == self.previous_details_version_num: - return - - detail_xml_url = DETAIL_XML_ZIP_URL_TEMPLATE.format( - administrator=self.administrator, - election_id=self.election_id, - version_num=version_num, - ) - - parsed_data = [] - - county_data = self._parse_file_from_zip_url(detail_xml_url, "detail.xml") - - if self.county: - county_details = CountyDetails( - self.state, self.county, self.election_id, version_num - ) - - parsed_data = self._parse_county_xml_data_to_precincts( - county_data, county_details - ) - else: - parsed_data = self._parse_state_xml_data_to_counties( - county_data, self.state - ) - - self.previous_details_version_num = version_num - - return parsed_data - - def get_detailed_results_for_participating_counties( - self, county_names: t.List[str] = None, force_update=False - ) -> t.Tuple[t.List[str], t.List[t.Dict]]: - """ - Fetch the latest detailed results for the given election for all participating counties - with detailed results, across all contests. - - Some counties may not have detailed results. If so, this will attempt - to fetch the summary results for that county. If no results exist for either, - the county name will be appended to the missing_counties list. - - After the first fetch, only the counties with updates will be returned, - previous results will not be included. - - Please note that all electoral entities administer their elections differently, - so not all values will be populated if the entity doesn't provide them. - - `Args:` - county_names: list[str] - The list of counties to get precinct-level results for. - Default: None (get all counties) - force_update: bool - If this is False, the connector will check to see if the current - version matches the previously fetched version of the results. - If the version has not been changed, no results will be fetched or returned. - Default: false - - `Returns:` - list[str] - The list of county names that could not be fetched - - list[dict] - The list should contain entries for each candidate in - each office, per vote method, county, and precinct. - Each row will contain the following: - - state - - county_name - - county_id - - office - - ballots_cast - - reg_voters - - vote_method (note: some administrators choose to differentiate - results by vote method, while others do not) - - candidate_name - - candidate_party (many administrators do not use this feature - and instead include the party in the candidate name) - - precinct_name - - recorded_votes (votes cast for the candidate with this vote method in this county) - - voter_turnout - - percent_reporting - - timestamp_last_updated - """ - - version_num = self._get_version(self.administrator, self.election_id) - - if not force_update and version_num == self.previous_county_details_version_num: - return [], [] - - county_details_list = self._get_latest_counties_scytl_info( - self.state, self.election_id, version_num - ) - - parsed_data = [] - fetched_counties = [] - missing_counties = [] - - for county_name, county_details in county_details_list.items(): - if county_names and county_name not in county_names: - continue - - if ( - not force_update - and county_name in self.previously_fetched_counties - and self.previous_county_details_list - and county_details.county_update_date - <= self.previous_county_details_list[county_name].county_update_date - ): - continue - - detail_xml_url = COUNTY_DETAIL_XML_ZIP_URL_TEMPLATE.format( - state=county_details.state, - county_name=county_details.county_name, - county_election_id=county_details.county_election_id, - county_version_num=county_details.county_version_num, - ) - - try: - county_data = self._parse_file_from_zip_url( - detail_xml_url, "detail.xml" - ) - - except requests.exceptions.RequestException: - try: - summary_data = self._fetch_and_parse_summary_results( - f"{self.state}/{county_name}", - county_details.county_election_id, - county_details.county_version_num, - county_name, - ) - - except requests.exceptions.RequestException: - missing_counties.append(county_name) - - else: - if len(summary_data) > 0: - parsed_data += summary_data - - else: - parsed_data += self._parse_county_xml_data_to_precincts( - county_data, county_details - ) - - fetched_counties.append(county_name) - - self.previous_county_details_version_num = version_num - self.previous_county_details_list = county_details_list - self.previously_fetched_counties = set(fetched_counties) - - return missing_counties, parsed_data diff --git a/parsons/sftp/__init__.py b/parsons/sftp/__init__.py index 2e59cd0ae5..aad92ae9e6 100644 --- a/parsons/sftp/__init__.py +++ b/parsons/sftp/__init__.py @@ -1,3 +1,5 @@ from parsons.sftp.sftp import SFTP -__all__ = ["SFTP"] +__all__ = [ + 'SFTP' +] diff --git a/parsons/sftp/sftp.py b/parsons/sftp/sftp.py index 4b4301357b..c09670bf22 100644 --- a/parsons/sftp/sftp.py +++ b/parsons/sftp/sftp.py @@ -68,7 +68,7 @@ def create_connection(self): conn.close() transport.close() - def list_directory(self, remote_path=".", connection=None): + def list_directory(self, remote_path='.', connection=None): """ List the contents of a directory @@ -151,14 +151,8 @@ def get_file(self, remote_path, local_path=None, connection=None): return local_path @connect - def get_files( - self, - files_to_download=None, - remote=None, - connection=None, - pattern=None, - local_paths=None, - ): + def get_files(self, files_to_download=None, remote=None, connection=None, pattern=None, + local_paths=None): """ Download a list of files, either by providing the list explicitly, providing directories that contain files to download, or both. @@ -182,10 +176,8 @@ def get_files( """ if not (files_to_download or remote): - raise ValueError( - "You must provide either `files_to_download`, `remote`, or both, as " - "an argument to `get_files`." - ) + raise ValueError("You must provide either `files_to_download`, `remote`, or both, as " + "an argument to `get_files`.") if not files_to_download: files_to_download = [] @@ -195,20 +187,15 @@ def get_files( files_to_download.extend(self.list_files(remote, connection, pattern)) except TypeError: # if it's not a str it's a list files_to_download.extend( - f - for file_list in [ - self.list_files(directory, connection, pattern) - for directory in remote - ] + f for file_list in [self.list_files(directory, connection, pattern) + for directory in remote] for f in file_list ) if local_paths and len(local_paths) != len(files_to_download): - logger.warning( - "You provided a list of local paths for your files but it was not " - "the same length as the files you are going to download.\nDefaulting to " - "temporary files." - ) + logger.warning("You provided a list of local paths for your files but it was not " + "the same length as the files you are going to download.\nDefaulting to " + "temporary files.") local_paths = [] if local_paths: @@ -241,7 +228,7 @@ def get_table(self, remote_path, connection=None): """ if not file_utilities.valid_table_suffix(remote_path): - raise ValueError("File type cannot be converted to a Parsons table.") + raise ValueError('File type cannot be converted to a Parsons table.') return Table.from_csv(self.get_file(remote_path, connection=connection)) @@ -294,10 +281,10 @@ def get_file_size(self, remote_path, connection=None): """ if connection: - size = connection.file(remote_path, "r")._get_size() + size = connection.file(remote_path, 'r')._get_size() else: with self.create_connection() as connection: - size = connection.file(remote_path, "r")._get_size() + size = connection.file(remote_path, 'r')._get_size() return size / 1024 @@ -306,19 +293,15 @@ def _list_contents(remote_path, connection, dir_pattern=None, file_pattern=None) dirs_to_return = [] files_to_return = [] - dirs_and_files = [ - (S_ISDIR, dir_pattern, True, dirs_to_return), - (S_ISREG, file_pattern, False, files_to_return), - ] + dirs_and_files = [(S_ISDIR, dir_pattern, True, dirs_to_return), + (S_ISREG, file_pattern, False, files_to_return)] try: for entry in connection.listdir_attr(remote_path): entry_pathname = remote_path + "/" + entry.filename for method, pattern, do_search_full_path, paths in dirs_and_files: string = entry_pathname if do_search_full_path else entry.filename - if method(entry.st_mode) and ( - not pattern or re.search(pattern, string) - ): + if method(entry.st_mode) and (not pattern or re.search(pattern, string)): paths.append(entry_pathname) except FileNotFoundError: # This error is raised when a directory is empty pass @@ -363,15 +346,8 @@ def list_files(self, remote_path, connection=None, pattern=None): return self._list_contents(remote_path, connection, file_pattern=pattern)[1] @connect - def walk_tree( - self, - remote_path, - connection=None, - download=False, - dir_pattern=None, - file_pattern=None, - max_depth=2, - ): + def walk_tree(self, remote_path, connection=None, download=False, dir_pattern=None, + file_pattern=None, max_depth=2): """ Recursively walks a directory, fetching all subdirectories and files (as long as they match `dir_pattern` and `file_pattern`, respectively) and the maximum directory @@ -400,42 +376,24 @@ def walk_tree( """ if max_depth > 3: - logger.warning( - "Calling `walk_tree` with `max_depth` {}. " - "Recursively walking a remote directory will be much slower than a " - "similar operation on a local file system.".format(max_depth) - ) - - to_return = self._walk_tree( - remote_path, - connection, - download, - dir_pattern, - file_pattern, - max_depth=max_depth, - ) + logger.warning("Calling `walk_tree` with `max_depth` {}. " + "Recursively walking a remote directory will be much slower than a " + "similar operation on a local file system.".format(max_depth)) + + to_return = self._walk_tree(remote_path, connection, download, dir_pattern, file_pattern, + max_depth=max_depth) return to_return - def _walk_tree( - self, - remote_path, - connection, - download=False, - dir_pattern=None, - file_pattern=None, - depth=0, - max_depth=2, - ): + def _walk_tree(self, remote_path, connection, download=False, dir_pattern=None, + file_pattern=None, depth=0, max_depth=2): dir_list = [] file_list = [] depth += 1 - dirs, files = self._list_contents( - remote_path, connection, dir_pattern, file_pattern - ) + dirs, files = self._list_contents(remote_path, connection, dir_pattern, file_pattern) if download: self.get_files(files_to_download=files) @@ -443,13 +401,7 @@ def _walk_tree( if depth < max_depth: for directory in dirs: deeper_dirs, deeper_files = self._walk_tree( - directory, - connection, - download, - dir_pattern, - file_pattern, - depth, - max_depth, + directory, connection, download, dir_pattern, file_pattern, depth, max_depth ) dir_list.extend(deeper_dirs) file_list.extend(deeper_files) diff --git a/parsons/sftp/utilities.py b/parsons/sftp/utilities.py index 999a4839e6..bbf88181ac 100644 --- a/parsons/sftp/utilities.py +++ b/parsons/sftp/utilities.py @@ -5,7 +5,7 @@ def connection_exists(args, kwargs): if any([isinstance(arg, paramiko.sftp_client.SFTPClient) for arg in args]): return True - if "connection" in kwargs and kwargs["connection"]: + if 'connection' in kwargs and kwargs['connection']: return True return False @@ -15,7 +15,7 @@ def connect(func): def wrapper(*args, **kwargs): if not connection_exists(args, kwargs): with args[0].create_connection() as connection: - kwargs["connection"] = connection + kwargs['connection'] = connection return func(*args, **kwargs) else: return func(*args, **kwargs) diff --git a/parsons/shopify/__init__.py b/parsons/shopify/__init__.py index ff1eeeb94c..51cfe3e531 100644 --- a/parsons/shopify/__init__.py +++ b/parsons/shopify/__init__.py @@ -1,3 +1,5 @@ from parsons.shopify.shopify import Shopify -__all__ = ["Shopify"] +__all__ = [ + 'Shopify' +] diff --git a/parsons/shopify/shopify.py b/parsons/shopify/shopify.py index 467753ad52..8282ada9d8 100644 --- a/parsons/shopify/shopify.py +++ b/parsons/shopify/shopify.py @@ -9,6 +9,7 @@ class Shopify(object): """ Instantiate the Shopify class + `Args:` subdomain: str The Shopify subdomain (e.g. ``myorg`` for myorg.myshopify.com) Not required if @@ -22,49 +23,23 @@ class Shopify(object): api_version: str The Shopify API version. Not required if ``SHOPIFY_API_VERSION`` env variable set. - access_token: str - The Shopify access token. Not required if ``SHOPIFY_ACCESS_TOKEN`` env - variable set. If argument or env variable is set, password and api_key - are ignored. `Returns:` Shopify Class """ - - def __init__( - self, - subdomain=None, - password=None, - api_key=None, - api_version=None, - access_token=None, - ): - self.subdomain = check_env.check("SHOPIFY_SUBDOMAIN", subdomain) - self.access_token = check_env.check( - "SHOPIFY_ACCESS_TOKEN", access_token, optional=True - ) - self.password = check_env.check("SHOPIFY_PASSWORD", password, optional=True) - self.api_key = check_env.check("SHOPIFY_API_KEY", api_key, optional=True) - self.api_version = check_env.check("SHOPIFY_API_VERSION", api_version) - self.base_url = "https://%s.myshopify.com/admin/api/%s/" % ( - self.subdomain, - self.api_version, + def __init__(self, subdomain=None, password=None, api_key=None, api_version=None): + self.subdomain = check_env.check('SHOPIFY_SUBDOMAIN', subdomain) + self.password = check_env.check('SHOPIFY_PASSWORD', password) + self.api_key = check_env.check('SHOPIFY_API_KEY', api_key) + self.api_version = check_env.check('SHOPIFY_API_VERSION', api_version) + self.base_url = 'https://%s.myshopify.com/admin/api/%s/' % ( + self.subdomain, self.api_version ) - if self.access_token is None and ( - self.password is None or self.api_key is None - ): - raise KeyError("Must set either access_token or both api_key and password.") - if self.access_token is not None: - self.client = APIConnector( - self.base_url, headers={"X-Shopify-Access-Token": access_token} - ) - else: - self.client = APIConnector( - self.base_url, auth=(self.api_key, self.password) - ) + self.client = APIConnector(self.base_url, auth=(self.api_key, self.password)) def get_count(self, query_date=None, since_id=None, table_name=None): """ Get the count of rows in a table. + `Args:` query_date: str Filter query by a date that rows were created. This filter is ignored if value @@ -76,17 +51,13 @@ def get_count(self, query_date=None, since_id=None, table_name=None): `Returns:` int """ - return ( - self.client.request( - self.get_query_url(query_date, since_id, table_name), "GET" - ) - .json() - .get("count", 0) - ) + return self.client.request(self.get_query_url(query_date, since_id, table_name), + 'GET').json().get("count", 0) def get_orders(self, query_date=None, since_id=None, completed=True): """ Get Shopify orders. + `Args:` query_date: str Filter query by a date that rows were created. Format: yyyy-mm-dd. This filter @@ -104,9 +75,9 @@ def _append_orders(url): nonlocal orders if completed: - url += "&financial_status=paid" + url += '&financial_status=paid' - res = self.client.request(url, "GET") + res = self.client.request(url, 'GET') cur_orders = res.json().get("orders", []) @@ -118,11 +89,11 @@ def _append_orders(url): for key1 in order: if isinstance(order[key1], dict): for key2 in order[key1]: - keys_to_add[key1 + "_" + key2] = order[key1][key2] + keys_to_add[key1 + '_' + key2] = order[key1][key2] keys_to_delete.append(key1) - elif key1 == "note_attributes": + elif key1 == 'note_attributes': for note in order[key1]: - keys_to_add[key1 + "_" + note["name"]] = note["value"] + keys_to_add[key1 + '_' + note['name']] = note['value'] order.update(keys_to_add) for key in keys_to_delete: @@ -136,7 +107,7 @@ def _append_orders(url): # Get next page while res.headers.get("Link"): - link = re.split("; |, ", res.headers.get("Link")) + link = re.split('; |, ', res.headers.get("Link")) if len(link) and link[len(link) - 1] == 'rel="next"': res = _append_orders(link[len(link) - 2][1:-1]) else: @@ -144,11 +115,10 @@ def _append_orders(url): return Table(orders) - def get_query_url( - self, query_date=None, since_id=None, table_name=None, count=True - ): + def get_query_url(self, query_date=None, since_id=None, table_name=None, count=True): """ Get the URL of a Shopify API request + `Args:` query_date: str Filter query by a date that rows were created. Format: yyyy-mm-dd. This filter @@ -162,62 +132,51 @@ def get_query_url( `Returns:` str """ - filters = "limit=250&status=any" + filters = 'limit=250&status=any' if count: - table = table_name + "/count.json" + table = table_name + '/count.json' else: - table = table_name + ".json" + table = table_name + '.json' if query_date: # Specific date if provided query_date = datetime.strptime(query_date, "%Y-%m-%d") max_date = query_date + timedelta(days=1) - filters += "&created_at_min={}&created_at_max={}".format( - query_date.isoformat(), max_date.isoformat() - ) + filters += '&created_at_min={}&created_at_max={}'.format(query_date.isoformat(), + max_date.isoformat()) elif since_id: # Since ID if provided - filters += "&since_id=%s" % since_id + filters += '&since_id=%s' % since_id - return self.base_url + "%s?%s" % (table, filters) + return self.base_url + '%s?%s' % (table, filters) def graphql(self, query): """ Make GraphQL request. Reference: https://shopify.dev/api/admin-graphql + `Args:` query: str GraphQL query. `Returns:` dict """ - return ( - self.client.request( - self.base_url + "graphql.json", "POST", json={"query": query} - ) - .json() - .get("data") - ) + return self.client.request( + self.base_url + 'graphql.json', 'POST', json={"query": query} + ).json().get('data') @classmethod - def load_to_table( - cls, - subdomain=None, - password=None, - api_key=None, - api_version=None, - query_date=None, - since_id=None, - completed=True, - ): + def load_to_table(cls, subdomain=None, password=None, api_key=None, api_version=None, + query_date=None, since_id=None, completed=True): """ Fast classmethod so you can get the data all at once: - tabledata = Shopify.load_to_table(subdomain='myorg', password='abc123', - api_key='abc123', api_version='2020-10', - query_date='2020-10-20', since_id='8414', - completed=True) + tabledata = Shopify.load_to_table(subdomain='myorg', password='abc123', + api_key='abc123', api_version='2020-10', + query_date='2020-10-20', since_id='8414', + completed=True) This instantiates the class and makes the appropriate query type to Shopify's orders table based on which arguments are supplied. + `Args:` subdomain: str The Shopify subdomain (e.g. ``myorg`` for myorg.myshopify.com). @@ -238,6 +197,5 @@ def load_to_table( `Returns:` Table Class """ - return cls(subdomain, password, api_key, api_version).get_orders( - query_date, since_id, completed - ) + return cls(subdomain, password, api_key, api_version).get_orders(query_date, since_id, + completed) diff --git a/parsons/sisense/__init__.py b/parsons/sisense/__init__.py index 107039e9b3..646fea11d6 100644 --- a/parsons/sisense/__init__.py +++ b/parsons/sisense/__init__.py @@ -1,3 +1,5 @@ from parsons.sisense.sisense import Sisense -__all__ = ["Sisense"] +__all__ = [ + 'Sisense' +] diff --git a/parsons/sisense/sisense.py b/parsons/sisense/sisense.py index 3da1eb97e5..ebd0644f5e 100644 --- a/parsons/sisense/sisense.py +++ b/parsons/sisense/sisense.py @@ -6,7 +6,7 @@ logger = logging.getLogger(__name__) -URI = "https://app.periscopedata.com/api/v1/" +URI = 'https://app.periscopedata.com/api/v1/' class Sisense(object): @@ -25,13 +25,13 @@ class Sisense(object): """ def __init__(self, site_name=None, api_key=None): - self.site_name = check_env.check("SISENSE_SITE_NAME", site_name) - self.api_key = check_env.check("SISENSE_API_KEY", api_key) + self.site_name = check_env.check('SISENSE_SITE_NAME', site_name) + self.api_key = check_env.check('SISENSE_API_KEY', api_key) self.uri = URI self.api = self._api() def _api(self): - headers = {"HTTP-X-PARTNER-AUTH": self.site_name + ":" + self.api_key} + headers = {'HTTP-X-PARTNER-AUTH': self.site_name + ":" + self.api_key} return APIConnector(uri=self.uri, headers=headers) def publish_shared_dashboard(self, dashboard_id, chart_id=None, **kwargs): @@ -49,10 +49,8 @@ def publish_shared_dashboard(self, dashboard_id, chart_id=None, **kwargs): `Returns:` Response (dict containing the URL) or an error """ - payload = {"dashboard": dashboard_id, "chart": chart_id, **kwargs} - return self.api.post_request( - "shared_dashboard/create", data=json.dumps(payload) - ) + payload = {'dashboard': dashboard_id, 'chart': chart_id, **kwargs} + return self.api.post_request('shared_dashboard/create', data=json.dumps(payload)) def list_shared_dashboards(self, dashboard_id): """ @@ -64,8 +62,8 @@ def list_shared_dashboards(self, dashboard_id): `Returns:` Response or an error """ - payload = {"dashboard": dashboard_id} - return self.api.post_request("shared_dashboard/list", data=json.dumps(payload)) + payload = {'dashboard': dashboard_id} + return self.api.post_request('shared_dashboard/list', data=json.dumps(payload)) def delete_shared_dashboard(self, token): """ @@ -82,7 +80,5 @@ def delete_shared_dashboard(self, token): `Returns:` Response or an error """ - payload = {"token": token} - return self.api.post_request( - "shared_dashboard/delete", data=json.dumps(payload) - ) + payload = {'token': token} + return self.api.post_request('shared_dashboard/delete', data=json.dumps(payload)) diff --git a/parsons/targetsmart/__init__.py b/parsons/targetsmart/__init__.py index f5564c3d37..7c62a33a94 100644 --- a/parsons/targetsmart/__init__.py +++ b/parsons/targetsmart/__init__.py @@ -1,4 +1,7 @@ from parsons.targetsmart.targetsmart_api import TargetSmartAPI from parsons.targetsmart.targetsmart_automation import TargetSmartAutomation -__all__ = ["TargetSmartAPI", "TargetSmartAutomation"] +__all__ = [ + 'TargetSmartAPI', + 'TargetSmartAutomation' +] diff --git a/parsons/targetsmart/targetsmart_api.py b/parsons/targetsmart/targetsmart_api.py index bbe276df7e..1a54f4a9c6 100644 --- a/parsons/targetsmart/targetsmart_api.py +++ b/parsons/targetsmart/targetsmart_api.py @@ -1,27 +1,17 @@ -""" -Routines for interacting with TargetSmart's developer API. - -https://docs.targetsmart.com/developers/tsapis/v2/index.html -""" -import logging - -import petl import requests +import petl from parsons.etl.table import Table from parsons.utilities import check_env -from .targetsmart_smartmatch import SmartMatch +URI = 'https://api.targetsmart.com/' -URI = "https://api.targetsmart.com/" -logger = logging.getLogger(__name__) +class TargetSmartConnector(object): - -class TargetSmartConnector: def __init__(self, api_key): self.uri = URI - self.api_key = check_env.check("TS_API_KEY", api_key) - self.headers = {"x-api-key": self.api_key} + self.api_key = check_env.check('TS_API_KEY', api_key) + self.headers = {'x-api-key': self.api_key} def request(self, url, args=None, raw=False): @@ -32,15 +22,16 @@ def request(self, url, args=None, raw=False): return r.json() - return Table(r.json()["output"]) + return Table(r.json()['output']) + +class Person(object): -class Person: def __init__(self): return None - def data_enhance(self, search_id, search_id_type="voterbase", state=None): + def data_enhance(self, search_id, search_id_type='voterbase', state=None): """ Searches for a record based on an id or phone or email address @@ -48,7 +39,7 @@ def data_enhance(self, search_id, search_id_type="voterbase", state=None): search_id: str The primary key or email address or phone number search_id_type: str - One of ``voterbase``, ``exacttrack``, ``phone``, + One of ``voterbase``, ``exacttrack``, ``abilitec_consumer_link``, ``phone``, ``email``, ``smartvan``, ``votebuilder``, ``voter``, ``household``. state: str Two character state code. Required if ``search_id_type`` of ``smartvan``, @@ -58,65 +49,37 @@ def data_enhance(self, search_id, search_id_type="voterbase", state=None): See :ref:`parsons-table` for output options. """ - if search_id_type in ["smartvan", "votebuilder", "voter"] and state is None: + if search_id_type in ['smartvan', 'votebuilder', 'voter'] and state is None: - raise KeyError( - "Search ID type '{}' requires state kwarg".format(search_id_type) - ) + raise KeyError("Search ID type '{}' requires state kwarg".format(search_id_type)) - if search_id_type not in ( - "voterbase", - "exacttrack", - "phone", - "email", - "smartvan", - "votebuilder", - "voter", - "household", - ): + if search_id_type not in ('voterbase', 'exacttrack', 'abilitec_consumer_link', 'phone', + 'email', 'smartvan', 'votebuilder', 'voter', 'household'): - raise ValueError("Search_id_type is not valid") + raise ValueError('Search_id_type is not valid') - url = self.connection.uri + "person/data-enhance" + url = self.connection.uri + 'person/data-enhance' - args = { - "search_id": search_id, - "search_id_type": search_id_type, - "state": state, - } + args = {'search_id': search_id, + 'search_id_type': search_id_type, + 'state': state + } return self.connection.request(url, args=args) - def radius_search( - self, - first_name, - last_name, - middle_name=None, - name_suffix=None, - latitude=None, - longitude=None, - address=None, - radius_size=10, - radius_unit="miles", - max_results=10, - gender="a", - age_min=None, - age_max=None, - composite_score_min=1, - composite_score_max=100, - last_name_exact=True, - last_name_is_prefix=False, - last_name_prefix_length=10, - address_type="reg", - ): + def radius_search(self, first_name, last_name, middle_name=None, name_suffix=None, + latitude=None, longitude=None, address=None, radius_size=10, + radius_unit='miles', max_results=10, gender='a', age_min=None, age_max=None, + composite_score_min=1, composite_score_max=100, last_name_exact=True, + last_name_is_prefix=False, last_name_prefix_length=10): """ Search for a person based on a specified radius `Args`: first_name: str - One or more alpha characters. Required + One or more alpha characters last_name: str - One or more alpha characters. Required + One or more alpha characters middle_name: str One or more alpha characters name_suffix: str @@ -129,8 +92,6 @@ def radius_search( Any geocode-able address address_type: str ``reg`` for registration (default) or ``tsmart`` for TargetSmart - radius_size: int - A positive integer where combined with ``radius_unit`` does not exceed 120 miles radius_unit: str One of ``meters``, ``feet``, ``miles`` (default), or ``kilometers``. max_results: int @@ -168,46 +129,36 @@ def radius_search( """ if (latitude is None or longitude is None) and address is None: - raise ValueError("Lat/Long or Address required") - - if not first_name: - raise ValueError("First name is required") - - if not last_name: - raise ValueError("Last name is required") + raise ValueError('Lat/Long or Address required') # Convert booleans for a in [last_name_exact, last_name_is_prefix]: a = str(a) - url = self.connection.uri + "person/radius-search" - - args = { - "first_name": first_name, - "last_name": last_name, - "middle_name": middle_name, - "name_suffix": name_suffix, - "latitude": latitude, - "longitude": longitude, - "address": address, - "address_type": address_type, - "radius_size": radius_size, - "radius_unit": radius_unit, - "max_results": max_results, - "gender": gender, - "age_min": age_min, - "age_max": age_max, - "composite_score_min": composite_score_min, - "composite_score_max": composite_score_max, - "last_name_exact": last_name_exact, - "last_name_is_prefix": last_name_is_prefix, - "last_name_prefix_length": last_name_prefix_length, - } + url = self.connection.uri + 'person/radius-search' + + args = {'first_name': first_name, + 'last_name': last_name, + 'middle_name': middle_name, + 'name_suffix': name_suffix, + 'latitude': latitude, + 'longitude': longitude, + 'address': address, + 'radius_size': radius_size, + 'radius_unit': radius_unit, + 'max_results': max_results, + 'gender': gender, + 'age_min': age_min, + 'age_max': age_max, + 'composite_score_min': composite_score_min, + 'composite_score_max': composite_score_max, + 'last_name_exact': last_name_exact, + 'last_name_is_prefix': last_name_is_prefix, + 'last_name_prefix_length': last_name_prefix_length + } r = self.connection.request(url, args=args, raw=True) - return Table([itm for itm in r["output"]]).unpack_dict( - "data_fields", prepend=False - ) + return Table([itm for itm in r['output']]).unpack_dict('data_fields', prepend=False) def phone(self, table): """ @@ -222,28 +173,21 @@ def phone(self, table): See :ref:`parsons-table` for output options. """ - url = self.connection.uri + "person/phone-search" + url = self.connection.uri + 'person/phone-search' + + args = {'phones': list(petl.values(table.table, 0))} - args = {"phones": list(petl.values(table.table, 0))} + return Table(self.connection.request(url, args=args, raw=True)['result']) - return Table(self.connection.request(url, args=args, raw=True)["result"]) +class Service(object): -class Service: def __init__(self): return None - def district( - self, - search_type="zip", - address=None, - zip5=None, - zip4=None, - state=None, - latitude=None, - longitude=None, - ): + def district(self, search_type='zip', address=None, zip5=None, zip4=None, state=None, + latitude=None, longitude=None): """ Return district information based on a geographic point. The method allows you to search based on the following: @@ -253,7 +197,7 @@ def district( :header-rows: 1 * - Search Type - - ``search_type`` + - Search Type Name - Required kwarg(s) * - Zip Code - ``zip`` @@ -262,7 +206,7 @@ def district( - ``address`` - ``address`` * - Point - - ``point`` + - point - ``latitude``, ``longitude`` `Args`: @@ -279,65 +223,53 @@ def district( The two character state code latitude: float or str Valid latitude floating point - longitude: float or str + lontitude: float or str Valid longitude floating point `Returns`: Parsons Table See :ref:`parsons-table` for output options. """ - if search_type == "zip" and None in [zip5, zip4]: + if search_type == 'zip' and None in [zip5, zip4]: raise ValueError("Search type 'zip' requires 'zip5' and 'zip4' arguments") - elif search_type == "point" and None in [latitude, longitude]: - raise ValueError( - "Search type 'point' requires 'latitude' and 'longitude' arguments" - ) + elif search_type == 'point' and None in [latitude, longitude]: + raise ValueError("Search type 'point' requires 'latitude' and 'longitude' arguments") - elif search_type == "address" and None in [address]: + elif search_type == 'address' and None in [address]: raise ValueError("Search type 'address' requires 'address' argument") - elif search_type not in ["zip", "point", "address"]: + elif search_type not in ['zip', 'point', 'address']: raise KeyError("Invalid 'search_type' provided. ") else: pass - url = self.connection.uri + "service/district" + url = self.connection.uri + 'service/district' - args = { - "search_type": search_type, - "address": address, - "zip5": zip5, - "zip4": zip4, - "state": state, - "latitude": latitude, - "longitude": longitude, - } + args = {'search_type': search_type, + 'address': address, + 'zip5': zip5, + 'zip4': zip4, + 'state': state, + 'latitude': latitude, + 'longitude': longitude + } - return Table([self.connection.request(url, args=args, raw=True)["match_data"]]) + return Table([self.connection.request(url, args=args, raw=True)['match_data']]) class Voter(object): + def __init__(self, connection): self.connection = connection - def voter_registration_check( - self, - first_name=None, - last_name=None, - state=None, - street_number=None, - street_name=None, - city=None, - zip_code=None, - age=None, - dob=None, - phone=None, - email=None, - unparsed_full_address=None, - ): + def voter_registration_check(self, first_name=None, last_name=None, + state=None, street_number=None, + street_name=None, city=None, zip_code=None, + age=None, dob=None, phone=None, email=None, + unparsed_full_address=None): """ Searches for a registered individual, returns matches. @@ -361,45 +293,44 @@ def voter_registration_check( age; int Optional; One or more integers. Trailing wildcard allowed dob; str - Optional; Numeric characters in YYYYMMDD format. Trailing wildcard allowed + Numeric characters in YYYYMMDD format. Trailing wildcard allowed phone; str - Optional; Integer followed by 0 or more * or integers + Integer followed by 0 or more * or integers email: str - Optional; Alphanumeric character followed by 0 or more * or legal characters + Alphanumeric character followed by 0 or more * or legal characters (alphanumeric, @, -, .) unparsed_full_address: str - Optional; One or more alphanumeric characters. No wildcards. + One or more alphanumeric characters. No wildcards. `Returns` Parsons Table See :ref:`parsons-table` for output options. """ - url = self.connection.uri + "voter/voter-registration-check" + url = self.connection.uri + 'voter/voter-registration-check' if None in [first_name, last_name, state]: - raise ValueError( - """Function must include at least first_name, - last_name, and state.""" - ) - - args = { - "first_name": first_name, - "last_name": last_name, - "state": state, - "street_number": street_number, - "street_name": street_name, - "city": city, - "zip_code": zip_code, - "age": age, - "dob": dob, - "phone": phone, - "email": email, - "unparsed_full_address": unparsed_full_address, - } + raise ValueError("""Function must include at least first_name, + last_name, and state.""") + + args = {'first_name': first_name, + 'last_name': last_name, + 'state': state, + 'street_number': street_number, + 'street_name': street_name, + 'city': city, + 'zip_code': zip_code, + 'age': age, + 'dob': dob, + 'phone': phone, + 'email': email, + 'unparsed_full_address': unparsed_full_address + } return self.connection.request(url, args=args, raw=True) -class TargetSmartAPI(Voter, Person, Service, SmartMatch): +class TargetSmartAPI(Voter, Person, Service): + def __init__(self, api_key=None): + self.connection = TargetSmartConnector(api_key=api_key) diff --git a/parsons/targetsmart/targetsmart_automation.py b/parsons/targetsmart/targetsmart_automation.py index eff5ba0847..0bbd69ae53 100644 --- a/parsons/targetsmart/targetsmart_automation.py +++ b/parsons/targetsmart/targetsmart_automation.py @@ -1,26 +1,3 @@ -"""**TargetSmart Automation** - -Parsons provides methods for interacting with TargetSmart Automation Workflows, -a solution for executing custom file processing workflows programmatically. In -some cases, TargetSmart will provide custom list matching solutions using -Automation Workflows. Most TargetSmart clients do not have these workflows and -will only use the Developer API. - -**TargetSmart Developer API versus Automation** - -TargetSmart's Developer API provides an HTTP-based interface for consuming the -general web services that TargetSmart provides. The TargetSmart Automation -system solely provides a solution for consuming customized file processing -workflows that are provisioned for specific client needs. TargetSmart -Automation is based on SFTP instead of HTTP. - -https://docs.targetsmart.com/my_tsmart/automation/developer.html - -For most list matching applications, TargetSmart SmartMatch is now the recommended -solution. See `TargetSmartAPI.smartmatch`. - -""" - from parsons.sftp.sftp import SFTP from parsons.etl.table import Table from parsons.utilities.files import create_temp_file @@ -32,105 +9,75 @@ import xmltodict -TS_STFP_HOST = "transfer.targetsmart.com" +TS_STFP_HOST = 'transfer.targetsmart.com' TS_SFTP_PORT = 22 -TS_SFTP_DIR = "automation" +TS_SFTP_DIR = 'automation' logger = logging.getLogger(__name__) - # Automation matching documentation can be found here: -# https://docs.targetsmart.com/my_tsmart/automation/developer.html. +# https://docs.targetsmart.com/developers/automation/index.html + +# The columns are heavily customized by TS, so while I would like +# to do more column validation and mapping, I'm not sure that is +# going to be possible. + + class TargetSmartAutomation(object): - """ - * `Automation overview `_ - * `Automation integration doc `_ - """ # noqa def __init__(self, sftp_username=None, sftp_password=None): self.sftp_host = TS_STFP_HOST self.sftp_port = TS_SFTP_PORT self.sftp_dir = TS_SFTP_DIR - self.sftp_username = check_env.check("TS_SFTP_USERNAME", sftp_username) - self.sftp_password = check_env.check("TS_SFTP_PASSWORD", sftp_password) - self.sftp = SFTP( - self.sftp_host, - self.sftp_username, - self.sftp_password, - self.sftp_port, - ) - - def match( - self, - table, - job_type, - job_name=None, - emails=None, - call_back=None, - remove_files=True, - ): - """Submit a file for custom data processing using the TargetSmart Automation workflow solution. + self.sftp_username = check_env.check('TS_SFTP_USERNAME', sftp_username) + self.sftp_password = check_env.check('TS_SFTP_PASSWORD', sftp_password) + self.sftp = SFTP(self.sftp_host, self.sftp_username, self.sftp_password, self.sftp_port) + + def match(self, table, job_type, job_name=None, emails=None, call_back=None, remove_files=True): + """ + Match a table to TargetSmart using their bulk matching service. .. warning:: Table Columns - - Each Automation workflow expects an input file that meets the - layout requirements provided by TargetSmart. The number of columns - and column order is significant. So, if it expected 10 columns and - you only provide 9, it will fail. However, if you provide 10 - columns that are out of order, the job may succeed, but with - non-optimal results. You can obtain the layout requirements and - other information about a workflow by visiting the Automation - console in My TargetSmart. Contact `TargetSmart Client Services - `_ for support. + The automation job does not validates the file by column indexes + rather than columns names. So, if it expected 10 columns and you + only provide 9, it will fail. However, if you provide 10 columns that + are out of order, the job will succeed, but the records will not + match. Args: table: Parsons Table Object - A table object with the required columns. Each workflow type - requires the input file to meet the requirements provided by - TargetSmart. You can locate the input and output layouts for - your available workflows using the My TargetSmart Automation - console. + A table object with the required columns. (Required columns provided be TargetSmart) job_type: str - The workflow name to execute. **This is case sensitive.**. You - can locate the workflow names and other information by visiting - the Automation console in My TargetSmart. + The match job type. **This is case sensitive.** (Match job names provided by TargetSmart) job_name: str - Optional job execution name. + Optional job name. emails: list A list of emails that will received status notifications. This is useful in debugging failed jobs. call_back: str A callback url to which the status will be posted. See - `TargetSmart documentation `_ + `TargetSmart documentation `_ for more details. remove_files: boolean Remove the configuration, file to be matched and matched file from - the TargetSmart SFTP upon completion or failure of match. - - """ # noqa: E501,E261 + the TargetSmart FTP upon completion or failure of match. + """ # noqa: E501,E261 # Generate a match job job_name = job_name or str(uuid.uuid1()) try: # Upload table - self.sftp.put_file(table.to_csv(), f"{self.sftp_dir}/{job_name}_input.csv") - logger.info(f"Table with {table.num_rows} rows uploaded to TargetSmart.") + self.sftp.put_file(table.to_csv(), f'{self.sftp_dir}/{job_name}_input.csv') + logger.info(f'Table with {table.num_rows} rows uploaded to TargetSmart.') # Create/upload XML configuration - xml = self.create_job_xml( - job_type, - job_name, - emails=emails, - status_key=job_name, - call_back=call_back, - ) - self.sftp.put_file(xml, f"{self.sftp_dir}/{job_name}.job.xml") - logger.info( - f"Payload uploaded to TargetSmart. Job type: {job_type}. Job name: {job_name}" - ) + xml = self.create_job_xml(job_type, job_name, emails=emails, + status_key=job_name, call_back=call_back) + self.sftp.put_file(xml, f'{self.sftp_dir}/{job_name}.job.xml') + logger.info('Match configuration uploaded to TargetSmart.') # Check xml configuration status self.poll_config_status(job_name) @@ -139,39 +86,30 @@ def match( self.match_status(job_name) # Download the resulting file - tbl = Table.from_csv( - self.sftp.get_file(f"{self.sftp_dir}/{job_name}_output.csv") - ) + tbl = Table.from_csv(self.sftp.get_file(f'{self.sftp_dir}/{job_name}_output.csv')) finally: # Clean up files if remove_files: self.remove_files(job_name) + # Log Stats + # TO DO: Provide some stats on the match + # Return file as a Table return tbl - def execute(self, *args, **kwargs): - """Most Automation workflows perform list matching. However, it is possible that - a custom workflow might be provisioned for a client for other types of - file processing. The ``execute`` method is provided as an alias for the - ``match`` method which may be a confusing name in these cases. - """ - self.match(*args, **kwargs) - - def create_job_xml( - self, job_type, job_name, emails=None, status_key=None, call_back=None - ): + def create_job_xml(self, job_type, job_name, emails=None, status_key=None, call_back=None): # Internal method to create a valid job xml job = ET.Element("job") # Generate Base XML - input_file = ET.SubElement(job, "inputfile") - input_file.text = job_name + "_input.csv" - output_file = ET.SubElement(job, "outputfile") - output_file.text = job_name + "_output.csv" - jobtype = ET.SubElement(job, "jobtype", text=job_type) + input_file = ET.SubElement(job, 'inputfile') + input_file.text = job_name + '_input.csv' + output_file = ET.SubElement(job, 'outputfile') + output_file.text = job_name + '_output.csv' + jobtype = ET.SubElement(job, 'jobtype', text=job_type) jobtype.text = job_type # Add status key @@ -186,10 +124,10 @@ def create_job_xml( if emails: emails_el = ET.SubElement(args, "arg", name="__emails") - emails_el.text = ",".join(emails) + emails_el.text = ','.join(emails) # Write xml to file object - local_path = create_temp_file(suffix=".xml") + local_path = create_temp_file(suffix='.xml') tree = ET.ElementTree(job) tree.write(local_path) return local_path @@ -202,7 +140,7 @@ def poll_config_status(self, job_name, polling_interval=20): time.sleep(polling_interval) if self.config_status(job_name): return True - logger.info(f"Waiting on {job_name} job configuration...") + logger.info(f'Waiting on {job_name} job configuration...') def config_status(self, job_name): # Check the status of the configuration by parsing the @@ -210,17 +148,15 @@ def config_status(self, job_name): for f in self.sftp.list_directory(remote_path=self.sftp_dir): - if f == f"{job_name}.job.xml.good": - logger.info(f"Match job {job_name} configured.") + if f == f'{job_name}.job.xml.good': + logger.info(f'Match job {job_name} configured.') return True - elif f == f"{job_name}.job.xml.bad": - logger.info(f"Match job {job_name} configuration error.") + elif f == f'{job_name}.job.xml.bad': + logger.info(f'Match job {job_name} configuration error.') # To Do: Lift up the configuration error. - raise ValueError( - "Job configuration failed. If you provided an email" - "address, you will be sent more details." - ) + raise ValueError('Job configuration failed. If you provided an email' + 'address, you will be sent more details.') else: pass @@ -235,26 +171,22 @@ def match_status(self, job_name, polling_interval=60): while True: - logger.debug("Match running...") + logger.debug('Match running...') for file_name in self.sftp.list_directory(remote_path=self.sftp_dir): - if file_name == f"{job_name}.finish.xml": + if file_name == f'{job_name}.finish.xml': - xml_file = self.sftp.get_file( - f"{self.sftp_dir}/{job_name}.finish.xml" - ) - with open(xml_file, "rb") as x: + xml_file = self.sftp.get_file(f'{self.sftp_dir}/{job_name}.finish.xml') + with open(xml_file, 'rb') as x: xml = xmltodict.parse(x, dict_constructor=dict) - if xml["jobcontext"]["state"] == "error": + if xml['jobcontext']['state'] == 'error': # To Do: Parse these in a pretty way logger.info(f"Match Error: {xml['jobcontext']['errors']}") - raise ValueError( - f"Match job failed. {xml['jobcontext']['errors']}" - ) + raise ValueError(f"Match job failed. {xml['jobcontext']['errors']}") - elif xml["jobcontext"]["state"] == "success": - logger.info("Match complete.") + elif xml['jobcontext']['state'] == 'success': + logger.info('Match complete.') return True @@ -265,5 +197,5 @@ def remove_files(self, job_name): for file_name in self.sftp.list_directory(remote_path=self.sftp_dir): if job_name in file_name: - self.sftp.remove_file(f"{self.sftp_dir}/{file_name}") - logger.info(f"{file_name} removed from SFTP.") + self.sftp.remove_file(f'{self.sftp_dir}/{file_name}') + logger.info(f'{file_name} removed from SFTP.') diff --git a/parsons/targetsmart/targetsmart_smartmatch.py b/parsons/targetsmart/targetsmart_smartmatch.py deleted file mode 100644 index 55798410c2..0000000000 --- a/parsons/targetsmart/targetsmart_smartmatch.py +++ /dev/null @@ -1,318 +0,0 @@ -"""Implements client routine to allow execution of TargetSmart SmartMatch -workflows. - -TargetSmart SmartMatch API doc: -https://docs.targetsmart.com/developers/tsapis/v2/service/smartmatch.html -""" - -import gzip -import logging -import shutil -import tempfile -import time -import uuid - -import petl -import requests - -from parsons import Table - -logger = logging.getLogger(__name__) - -VALID_FIELDS = [ - "voterbase_id", - "smartvan_id", - "voter_id", - "exact_track", - "full_name", - "first_name_combined", - "first_name", - "middle_name", - "last_name", - "name_suffix", - "address1", - "address2", - "city", - "state", - "zip", - "age", - "gender", - "dob", - "phone", - "email", - "latitude", - "longitude", -] - -INTERNAL_JOIN_ID = "matchback_id" -INTERNAL_JOIN_ID_CONFLICT = "__matchback_id" - - -class SmartMatchError(Exception): - """Raised when SmartMatch workflow processing fails.""" - - -def _smartmatch_upload(url, fname): - logger.info(f"Uploading {fname} to {url} to begin SmartMatch workflow execution.") - with open(fname, "rb") as reader: - response_2 = requests.put(url, data=reader, headers={"Content-Type": ""}) - - response_2.raise_for_status() - - -def _smartmatch_download(url, writer): - with requests.get(url, stream=True) as response: - response.raise_for_status() - for chunk in response.iter_content(chunk_size=8192): - writer.write(chunk) - - -def _add_join_id(input_table): - """`matchback_id` is added to the raw input table so the results can later be - joined back. Integer sequence values are used. If the column already exists - in the raw input, it is renamed to `__matchback_id` and restored after - result join. - """ - if INTERNAL_JOIN_ID in input_table.fieldnames(): - input_table = input_table.rename(INTERNAL_JOIN_ID, INTERNAL_JOIN_ID_CONFLICT) - - return input_table.addrownumbers(field=INTERNAL_JOIN_ID) - - -def _prepare_input(intable, tmpdir): - valid = VALID_FIELDS + [INTERNAL_JOIN_ID] - supported = set(intable.fieldnames()) & set(valid) - if not supported: - raise SmartMatchError( - "No supported field identifiers were found in the input table." - f" Expecting one or more from: {VALID_FIELDS}" - ) - return intable.cut(*supported) - - -class SmartMatch: - """ - Works as a mixin to the TargetSmartAPI class. - """ - - def __init__(self): - # Set by TargetSmartAPI constructor - self.connection = None - - def _smartmatch_poll(self, poll_url, submit_filename): - download_url = None - while True: - poll_response = requests.get( - poll_url, - {"filename": submit_filename}, - headers=self.connection.headers, - ) - - if poll_response.ok: - poll_info = poll_response.json() - - if poll_info["error"]: - raise SmartMatchError(poll_info["error"]) - - download_url = poll_info["url"] - if download_url: - break - time.sleep(60 * 2.5) - return download_url - - def smartmatch( - self, - input_table, - max_matches=1, - include_email=False, - include_landline=False, - include_wireless=False, - include_voip=False, - tmp_location=None, - keep_smartmatch_input_file=False, - keep_smartmatch_output_gz_file=False, - ): - """Submit the contact list records available in the Parsons table ``input_table`` to - TargetSmart SmartMatch. - - * `SmartMatch overview `_ - * `SmartMatch API doc `_ - * `Supported input header field identifiers `_ - - Your application provides a contact list which will be matched to - TargetSmart’s database of voting age individuals. - - `TargetSmart Client Services `_ - provisions SmartMatch for your API key, configuring the fields from the - TargetSmart Data Dictionary that will be appended to each matched - record. - - This method blocks until TargetSmart has completed the remote workflow - execution. The execution time can take minutes to hours to complete - depending on the file size, the types of field identifiers present, and - TargetSmart system load. SmartMatch executions cannot be canceled once - submitted to TargetSmart. - - Since Parsons Petl tables are lazy, the SmartMatch output file is always - retained in ``tmp_location``. If your Parsons-based ETL workflow fails - downstream it may be beneficial to recover the raw SmartMatch output - from this location. You may delete this data when it is no longer - needed. - - `Args:` - input_table: Parsons or Petl table - A Parsons table with `header field names supported by SmartMatch `_. Required. - max_matches: int - By default only a single best match is returned for an input record. Increase to return additional potentially accurate matches for each input record. Value between 1-10. Default of 1. - include_email: bool - Set to True to include appended email values for matched records. This is only applicable if your TargetSmart account is configued to return email data. Additional charges may apply if True. Default of False. - include_landline: bool - Set to True to include appended landline phone number values for matched records. This is only applicable if your TargetSmart account is configued to return landline phone data. Additional charges may apply if True. Default of False. - include_wireless: bool - Set to True to include appended wireless phone number values for matched records. This is only applicable if your TargetSmart account is configued to return wireless phone data. Additional charges may apply if True. Default of False. - include_voip: bool - Set to True to include appended VOIP phone number values for matched records. This is only applicable if your TargetSmart account is configued to return VOIP phone data. Additional charges may apply if True. Default of False. - tmp_location: str - Optionally provide a local directory path where input/output CSV files will be stored. Useful to recover CSV output if downstream ETL processing fails. If not specified, a system tmp location is used. Default of None. - keep_smartmatch_input_file: bool - Optionally keep the CSV input file that is uploaded in ``tmp_location`` for later use. Default of False. - keep_smartmatch_output_gz_file: bool - Optionally keep the gzip compressed output file in ``tmp_location`` for later use. The uncompressed output file is always retained in ``tmp_location``. Default of False - `Returns:` - Parsons Table - A Parsons table wrapping the SmartMatch execution output file records. Each record will - include the input record fields followed by columns named ``tsmart_match_code``, a - match indicator, ``vb.voterbase_id``, and zero or more additional data - element fields based on your TargetSmart account configuration. - See :ref:`parsons-table` for output options. - """ # noqa - - # If `input_table` is a Parsons table, convert it to a Petl table. - if hasattr(input_table, "table"): - input_table = input_table.table - - url = self.connection.uri + "service/smartmatch" - poll_url = f"{url}/poll" - - if not input_table: - raise ValueError( - "Missing `input_table`. A Petl table must be provided with" - " valid input rows." - ) - - if not hasattr(input_table, "tocsv"): - raise ValueError("`input_table` isn't a valid table.") - - if int(max_matches) > 10: - raise ValueError("max_matches cannot be greater than 10") - - if not tmp_location: - tmp_location = tempfile.mkdtemp() - - logger.info("Preparing data for SmartMatch submission.") - input_table = _add_join_id(input_table) - dataprep_table = _prepare_input(input_table, tmp_location) - # Unique execution label for each submission - submit_filename = f"tmc_{str(uuid.uuid4())[0:10]}.csv" - - # An initial api.targetsmart.com request is performed to register the - # job execution. The response returns a presigned S3 url where data - # records will be uploaded. - response_1 = requests.get( - url, - { - "filename": submit_filename, - "include_email": include_email, - "include_landline": include_landline, - "include_wireless": include_wireless, - "include_voip": include_voip, - "max_matches": max_matches, - "format": "gzip", - }, - headers=self.connection.headers, - ) - response_1.raise_for_status() - response_1_info = response_1.json() - if response_1_info["error"]: - raise SmartMatchError( - "SmartMatch workflow registration failed. Error:" - f" {response_1_info['error']}" - ) - - logger.info( - "The SmartMatch workflow registration was successful for file name" - f" {submit_filename}." - ) - - # Write Petl table to CSV and upload for SmartMatch to process - with tempfile.NamedTemporaryFile( - mode="w+", - encoding="utf8", - newline="\n", - prefix="smartmatch_input", - suffix=".csv", - dir=tmp_location, - delete=not keep_smartmatch_input_file, - ) as tmp: - dataprep_table.tocsv(tmp.name, encoding="utf8") - tmp.flush() - _smartmatch_upload(response_1_info["url"], tmp.name) - - logger.info( - "The SmartMatch workflow execution has been submitted using file" - f" name '{submit_filename}'. Now polling for results which can take" - " minutes/hours depending on data size and queuing." - ) - - # Poll SmartMatch endpoint waiting for workflow completion - download_url = self._smartmatch_poll(poll_url, submit_filename) - - # Download SmartMatch .csv.gz results, decompress, and Petl table wrap. - # The final tmp file cannot be deleted due to Petl tables being lazy. - with tempfile.NamedTemporaryFile( - prefix="smartmatch_output", - suffix=".csv.gz", - dir=tmp_location, - delete=not keep_smartmatch_output_gz_file, - ) as tmp_gz: - with tempfile.NamedTemporaryFile( - prefix="smartmatch_output", - suffix=".csv", - dir=tmp_location, - delete=False, - ) as tmp_csv: - logger.info( - f"Downloading the '{submit_filename}' SmartMatch results to" - f" {tmp_gz.name}." - ) - _smartmatch_download(download_url, tmp_gz) - tmp_gz.flush() - - logger.info("Decompressing results") - with gzip.open(tmp_gz.name, "rb") as gz_reader: - shutil.copyfileobj(gz_reader, tmp_csv) - tmp_csv.flush() - - raw_outtable = petl.fromcsv( # pylint: disable=no-member - tmp_csv.name, encoding="utf8" - ).convert(INTERNAL_JOIN_ID, int) - logger.info( - "SmartMatch remote execution successful. Joining results to" - " input table." - ) - outtable = ( - petl.leftjoin( # pylint: disable=no-member - input_table, - raw_outtable, - key=INTERNAL_JOIN_ID, - tempdir=tmp_location, - ) - .sort(key=INTERNAL_JOIN_ID) - .cutout(INTERNAL_JOIN_ID) - ) - if INTERNAL_JOIN_ID_CONFLICT in input_table.fieldnames(): - input_table = input_table.rename( - INTERNAL_JOIN_ID_CONFLICT, INTERNAL_JOIN_ID - ) - - return Table(outtable) diff --git a/parsons/tools/credential_tools.py b/parsons/tools/credential_tools.py index 40d0374c8b..8a4b60f670 100644 --- a/parsons/tools/credential_tools.py +++ b/parsons/tools/credential_tools.py @@ -3,7 +3,7 @@ import json import os -PREFIX = "PRSNSENV" +PREFIX = 'PRSNSENV' def decode_credential(credential, save_path=None, export=True, echo=False): @@ -27,14 +27,13 @@ def decode_credential(credential, save_path=None, export=True, echo=False): if credential[:x] != PREFIX: raise ValueError("Invalid Parsons variable.") - decoded_str = b64decode(bytes(credential.replace(PREFIX, ""), "utf-8")).decode( - "utf-8" - ) + decoded_str = b64decode( + bytes(credential.replace(PREFIX, ""), "utf-8")).decode("utf-8") decoded_dict = json.loads(decoded_str) if save_path: - with open(save_path, "w") as f: + with open(save_path, 'w') as f: f.write(json.dumps(decoded_dict)) if export: @@ -75,7 +74,7 @@ def encode_from_json_file(credential_file): str The encoded credential. """ - with open(credential_file, "r") as f: + with open(credential_file, 'r') as f: data = json.load(f) json_str = json.dumps(data) @@ -120,45 +119,23 @@ def encode_from_dict(credential): return encoded_str -@click.command(options_metavar="[-e [-f] | -d [-xp] [-o ]]") -@click.argument("credential", metavar="credential") -@click.option( - "--encode", - "-e", - "fn", - flag_value="encode", - default=True, - help="Endcode a credential.", -) -@click.option( - "--decode", "-d", "fn", flag_value="decode", help="Decode an encoded credential." -) -@click.option( - "-f", - "is_file", - is_flag=True, - help=("Treat as a " "path to a file. Only valid with --encode."), -) -@click.option( - "-o", - "save_path", - default="", - metavar="", - help="The path for where to save the decoded credential.", -) -@click.option( - "-x", - "no_export", - is_flag=True, - default=False, - help=( - "Do not export the variable to the environment. Only " "valid with --decode." - ), -) -@click.option( - "-s", "suppress", is_flag=True, default=False, help=("Suppress " "the output.") -) -def main(credential, fn, is_file=False, save_path="", no_export=False, suppress=False): +@click.command(options_metavar='[-e [-f] | -d [-xp] [-o ]]') +@click.argument('credential', metavar='credential') +@click.option('--encode', '-e', 'fn', flag_value='encode', + default=True, help="Endcode a credential.") +@click.option('--decode', '-d', 'fn', flag_value='decode', + help='Decode an encoded credential.') +@click.option('-f', 'is_file', is_flag=True, help=("Treat as a " + "path to a file. Only valid with --encode.")) +@click.option('-o', 'save_path', default='', metavar='', + help="The path for where to save the decoded credential.") +@click.option('-x', 'no_export', is_flag=True, default=False, + help=("Do not export the variable to the environment. Only " + "valid with --decode.")) +@click.option('-s', 'suppress', is_flag=True, default=False, help=("Suppress " + "the output.")) +def main(credential, fn, is_file=False, save_path="", no_export=False, + suppress=False): """A command line tool to encode and decode credentials. Use this tool when the credentials for a service are split into multiple @@ -186,7 +163,7 @@ def main(credential, fn, is_file=False, save_path="", no_export=False, suppress= # Encoding a list currenct environment variables. `python env_tools.py -e env_var1,env_var2,env_ var3` """ - if fn == "encode": + if fn == 'encode': if is_file: enc_cred = encode_from_json_file(credential) else: @@ -194,15 +171,15 @@ def main(credential, fn, is_file=False, save_path="", no_export=False, suppress= cred = json.loads(credential) enc_cred = encode_from_dict(cred) except json.decoder.JSONDecodeError: - cred = credential.split(",") + cred = credential.split(',') enc_cred = encode_from_env(cred) if not suppress: print(enc_cred) - elif fn == "decode": + elif fn == 'decode': decode_credential(credential, save_path, not no_export, not suppress) else: raise ValueError("Invalid function selected. Use --help for help.") -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/parsons/turbovote/__init__.py b/parsons/turbovote/__init__.py index 6680069407..3baab83e48 100644 --- a/parsons/turbovote/__init__.py +++ b/parsons/turbovote/__init__.py @@ -1,3 +1,5 @@ from parsons.turbovote.turbovote import TurboVote -__all__ = ["TurboVote"] +__all__ = [ + 'TurboVote' +] diff --git a/parsons/turbovote/turbovote.py b/parsons/turbovote/turbovote.py index d9b4a80438..490e3dd709 100644 --- a/parsons/turbovote/turbovote.py +++ b/parsons/turbovote/turbovote.py @@ -5,7 +5,7 @@ logger = logging.getLogger(__name__) -TURBOVOTE_URI = "https://turbovote-admin-http-api.prod.democracy.works/" +TURBOVOTE_URI = 'https://turbovote-admin-http-api.prod.democracy.works/' class TurboVote(object): @@ -28,21 +28,22 @@ class TurboVote(object): def __init__(self, username=None, password=None, subdomain=None): - self.username = check_env.check("TURBOVOTE_USERNAME", username) - self.password = check_env.check("TURBOVOTE_PASSWORD", password) - self.subdomain = check_env.check("TURBOVOTE_SUBDOMAIN", subdomain) + self.username = check_env.check('TURBOVOTE_USERNAME', username) + self.password = check_env.check('TURBOVOTE_PASSWORD', password) + self.subdomain = check_env.check('TURBOVOTE_SUBDOMAIN', subdomain) self.uri = TURBOVOTE_URI def _get_token(self): # Retrieve a temporary bearer token to access API - url = self.uri + "login" - payload = {"username": self.username, "password": self.password} + url = self.uri + 'login' + payload = {'username': self.username, + 'password': self.password} r = requests.post(url, data=payload) logger.debug(r.url) r.raise_for_status() - return r.json()["id-token"] + return r.json()['id-token'] def get_users(self): """ @@ -53,13 +54,13 @@ def get_users(self): See :ref:`parsons-table` for output options. """ - url = self.uri + f"partners/{self.subdomain}.turbovote.org/users" + url = self.uri + f'partners/{self.subdomain}.turbovote.org/users' headers = {"Authorization": f"Bearer {self._get_token()}"} r = requests.get(url, headers=headers) logger.debug(r) r.raise_for_status() tbl = Table.from_csv_string(r.text) - logger.info(f"{tbl.num_rows} users retrieved.") + logger.info(f'{tbl.num_rows} users retrieved.') return tbl diff --git a/parsons/twilio/__init__.py b/parsons/twilio/__init__.py index 210277bfaa..dcbe18b269 100644 --- a/parsons/twilio/__init__.py +++ b/parsons/twilio/__init__.py @@ -1,3 +1,5 @@ from parsons.twilio.twilio import Twilio -__all__ = ["Twilio"] +__all__ = [ + 'Twilio' +] diff --git a/parsons/twilio/twilio.py b/parsons/twilio/twilio.py index 204f1981ea..38e82888fc 100644 --- a/parsons/twilio/twilio.py +++ b/parsons/twilio/twilio.py @@ -24,15 +24,15 @@ class Twilio: def __init__(self, account_sid=None, auth_token=None): - self.account_sid = check_env.check("TWILIO_ACCOUNT_SID", account_sid) - self.auth_token = check_env.check("TWILIO_AUTH_TOKEN", auth_token) + self.account_sid = check_env.check('TWILIO_ACCOUNT_SID', account_sid) + self.auth_token = check_env.check('TWILIO_AUTH_TOKEN', auth_token) self.client = Client(self.account_sid, self.auth_token) def _table_convert(self, obj): - tbl = Table([x.__dict__["_properties"] for x in obj]) + tbl = Table([x.__dict__['_properties'] for x in obj]) - if "subresource_uris" in tbl.columns and "uri" in tbl.columns: - tbl.remove_column("subresource_uris", "uri") + if 'subresource_uris' in tbl.columns and 'uri' in tbl.columns: + tbl.remove_column('subresource_uris', 'uri') return tbl @@ -48,7 +48,7 @@ def get_account(self, account_sid): """ r = self.client.api.accounts(account_sid) - logger.info(f"Retrieved {account_sid} account.") + logger.info(f'Retrieved {account_sid} account.') return r.__dict__ def get_accounts(self, name=None, status=None): @@ -68,18 +68,11 @@ def get_accounts(self, name=None, status=None): r = self.client.api.accounts.list(friendly_name=name, status=status) tbl = self._table_convert(r) - logger.info(f"Retrieved {tbl.num_rows} accounts.") + logger.info(f'Retrieved {tbl.num_rows} accounts.') return tbl - def get_account_usage( - self, - category=None, - start_date=None, - end_date=None, - time_period=None, - group_by=None, - exclude_null=False, - ): + def get_account_usage(self, category=None, start_date=None, end_date=None, time_period=None, + group_by=None, exclude_null=False): """ Get Twilio account usage. @@ -102,28 +95,30 @@ def get_account_usage( `Returns:` Parsons Table See :ref:`parsons-table` for output options. - """ # noqa: E501,E261 + """ # noqa: E501,E261 # Add populated arguments - args = {"category": category, "start_date": start_date, "end_date": end_date} + args = {'category': category, + 'start_date': start_date, + 'end_date': end_date} args = json_format.remove_empty_keys(args) # Parse out the time_periods - if time_period == "today": + if time_period == 'today': r = self.client.usage.records.today.list(**args) - elif time_period == "yesterday": + elif time_period == 'yesterday': r = self.client.usage.records.yesterday.list(**args) - elif time_period == "this_month": + elif time_period == 'this_month': r = self.client.usage.records.this_month.list(**args) - elif time_period == "last_month": + elif time_period == 'last_month': r = self.client.usage.records.last_month.list(**args) # Parse out the group by - elif group_by == "daily": + elif group_by == 'daily': r = self.client.usage.records.daily.list(**args) - elif group_by == "monthly": + elif group_by == 'monthly': r = self.client.usage.records.monthly.list(**args) - elif group_by == "yearly": + elif group_by == 'yearly': r = self.client.usage.records.yearly.list(**args) else: r = self.client.usage.records.list(**args) @@ -131,18 +126,12 @@ def get_account_usage( tbl = self._table_convert(r) if exclude_null: - tbl.remove_null_rows("count", null_value="0") + tbl.remove_null_rows('count', null_value='0') return tbl - def get_messages( - self, - to=None, - from_=None, - date_sent=None, - date_sent_before=None, - date_sent_after=None, - ): + def get_messages(self, to=None, from_=None, date_sent=None, date_sent_before=None, + date_sent_after=None): """ Get Twilio messages. @@ -162,14 +151,10 @@ def get_messages( See :ref:`parsons-table` for output options. """ - r = self.client.messages.list( - to=to, - from_=from_, - date_sent=date_sent, - date_sent_before=date_sent_before, - date_sent_after=date_sent_after, - ) + r = self.client.messages.list(to=to, from_=from_, date_sent=date_sent, + date_sent_before=date_sent_before, + date_sent_after=date_sent_after) tbl = self._table_convert(r) - logger.info(f"Retrieved {tbl.num_rows} messages.") + logger.info(f'Retrieved {tbl.num_rows} messages.') return tbl diff --git a/parsons/utilities/api_connector.py b/parsons/utilities/api_connector.py index 5c54106cbe..2af6af5208 100644 --- a/parsons/utilities/api_connector.py +++ b/parsons/utilities/api_connector.py @@ -32,12 +32,11 @@ class APIConnector(object): APIConnector class """ - def __init__( - self, uri, headers=None, auth=None, pagination_key=None, data_key=None - ): + def __init__(self, uri, headers=None, auth=None, pagination_key=None, data_key=None): + # Add a trailing slash if its missing - if not uri.endswith("/"): - uri = uri + "/" + if not uri.endswith('/'): + uri = uri + '/' self.uri = uri self.headers = headers @@ -73,15 +72,8 @@ def request(self, url, req_type, json=None, data=None, params=None): """ full_url = urllib.parse.urljoin(self.uri, url) - return _request( - req_type, - full_url, - headers=self.headers, - auth=self.auth, - json=json, - data=data, - params=params, - ) + return _request(req_type, full_url, headers=self.headers, auth=self.auth, json=json, + data=data, params=params) def get_request(self, url, params=None): """ @@ -96,15 +88,14 @@ def get_request(self, url, params=None): A requests response object """ - r = self.request(url, "GET", params=params) + r = self.request(url, 'GET', params=params) self.validate_response(r) logger.debug(r.json()) return r.json() - def post_request( - self, url, params=None, data=None, json=None, success_codes=[200, 201, 202, 204] - ): + def post_request(self, url, params=None, data=None, json=None, + success_codes=[200, 201, 202, 204]): """ Make a POST request. @@ -123,7 +114,7 @@ def post_request( A requests response object """ - r = self.request(url, "POST", params=params, data=data, json=json) + r = self.request(url, 'POST', params=params, data=data, json=json) # Validate the response and lift up an errors. self.validate_response(r) @@ -151,7 +142,7 @@ def delete_request(self, url, params=None, success_codes=[200, 201, 204]): A requests response object or status code """ - r = self.request(url, "DELETE", params=params) + r = self.request(url, 'DELETE', params=params) self.validate_response(r) @@ -163,9 +154,7 @@ def delete_request(self, url, params=None, success_codes=[200, 201, 204]): else: return r.status_code - def put_request( - self, url, data=None, json=None, params=None, success_codes=[200, 201, 204] - ): + def put_request(self, url, data=None, json=None, params=None, success_codes=[200, 201, 204]): """ Make a PUT request. @@ -182,7 +171,7 @@ def put_request( A requests response object """ - r = self.request(url, "PUT", params=params, data=data, json=json) + r = self.request(url, 'PUT', params=params, data=data, json=json) self.validate_response(r) @@ -192,9 +181,7 @@ def put_request( else: return r.status_code - def patch_request( - self, url, params=None, data=None, json=None, success_codes=[200, 201, 204] - ): + def patch_request(self, url, params=None, data=None, json=None, success_codes=[200, 201, 204]): """ Make a PATCH request. @@ -213,7 +200,7 @@ def patch_request( A requests response object """ - r = self.request(url, "PATCH", params=params, data=data, json=json) + r = self.request(url, 'PATCH', params=params, data=data, json=json) self.validate_response(r) @@ -236,14 +223,15 @@ def validate_response(self, resp): """ if resp.status_code >= 400: + if resp.reason: - message = f"HTTP error occurred ({resp.status_code}): {resp.reason}" + message = f'HTTP error occurred ({resp.status_code}): {resp.reason}' else: - message = f"HTTP error occurred ({resp.status_code})" + message = f'HTTP error occurred ({resp.status_code})' # Some errors return JSONs with useful info about the error. Return it if exists. if self.json_check(resp): - raise HTTPError(f"{message}, json: {resp.json()}") + raise HTTPError(f'{message}, json: {resp.json()}') else: raise HTTPError(message) @@ -261,7 +249,7 @@ def data_parse(self, resp): A dictionary of data. """ - # TODO: Some response jsons are enclosed in a list. Need to deal with unpacking and/or + # To Do: Some response jsons are enclosed in a list. Need to deal with unpacking and/or # not assuming that it is going to be a dict. # In some instances responses are just lists. diff --git a/parsons/utilities/check_env.py b/parsons/utilities/check_env.py index b175ed8dd1..b6b27b9b4f 100644 --- a/parsons/utilities/check_env.py +++ b/parsons/utilities/check_env.py @@ -12,8 +12,6 @@ def check(env, field, optional=False): return os.environ[env] except KeyError: if not optional: - raise KeyError( - f"No {env} found. Store as environment variable or " - f"pass as an argument." - ) + raise KeyError(f'No {env} found. Store as environment variable or ' + f'pass as an argument.') return field diff --git a/parsons/utilities/cloud_storage.py b/parsons/utilities/cloud_storage.py index 83e829185c..79cf2a5280 100644 --- a/parsons/utilities/cloud_storage.py +++ b/parsons/utilities/cloud_storage.py @@ -8,9 +8,7 @@ """ -def post_file( - tbl, type, file_path=None, quoting=csv.QUOTE_MINIMAL, **file_storage_args -): +def post_file(tbl, type, file_path=None, quoting=csv.QUOTE_MINIMAL, **file_storage_args): """ This utility method is a generalizable method for moving files to an online file storage class. It is used by methods that require access @@ -34,22 +32,19 @@ def post_file( ``None`` """ - if type.upper() == "S3": + if type.upper() == 'S3': # Overwrite the file_path if key is passed - if "key" in file_storage_args: - file_storage_args["key"] = file_path + if 'key' in file_storage_args: + file_storage_args['key'] = file_path - return tbl.to_s3_csv( - public_url=True, key=file_path, quoting=quoting, **file_storage_args - ) + return tbl.to_s3_csv(public_url=True, key=file_path, quoting=quoting, **file_storage_args) - elif type.upper() == "GCS": + elif type.upper() == 'GCS': - return tbl.to_gcs_csv( - public_url=True, blob_name=file_path, quoting=quoting, **file_storage_args - ) + return tbl.to_gcs_csv(public_url=True, blob_name=file_path, quoting=quoting, + **file_storage_args) else: - raise ValueError("Type must be S3 or GCS.") + raise ValueError('Type must be S3 or GCS.') diff --git a/parsons/utilities/datetime.py b/parsons/utilities/datetime.py index a3ace88700..dcc9fff1a8 100644 --- a/parsons/utilities/datetime.py +++ b/parsons/utilities/datetime.py @@ -52,9 +52,8 @@ def parse_date(value, tzinfo=datetime.timezone.utc): parsed = parse(value) else: raise TypeError( - "Unable to parse value; must be one of string or int or datetime, but got type " - f"{type(value)}" - ) + 'Unable to parse value; must be one of string or int or datetime, but got type ' + f'{type(value)}') if not parsed.tzinfo: parsed = parsed.replace(tzinfo=tzinfo) diff --git a/parsons/utilities/files.py b/parsons/utilities/files.py index 5e44e44558..3c38f1e678 100644 --- a/parsons/utilities/files.py +++ b/parsons/utilities/files.py @@ -5,13 +5,13 @@ import tempfile __all__ = [ - "create_temp_file", - "create_temp_file_for_path", - "is_gzip_path", - "suffix_for_compression_type", - "compression_type_for_path", - "string_to_temp_file", -] + 'create_temp_file', + 'create_temp_file_for_path', + 'is_gzip_path', + 'suffix_for_compression_type', + 'compression_type_for_path', + 'string_to_temp_file' + ] # Maximum number of times to try to open a new temp file before giving up. @@ -77,7 +77,7 @@ def create_temp_file_for_path(path): # Add the appropriate compression suffix to the file, so other libraries that check the # file's extension will know that it is compressed. # TODO Make this more robust, maybe even using the entire remote file name as the suffix. - suffix = ".gz" if is_gzip_path(path) else None + suffix = '.gz' if is_gzip_path(path) else None return create_temp_file(suffix=suffix) @@ -153,30 +153,30 @@ def track_temp_file(path): def is_gzip_path(path): - return path[-3:] == ".gz" + return (path[-3:] == '.gz') def is_zip_path(path): - return path[-4:] == ".zip" + return (path[-4:] == '.zip') def is_csv_path(path): - return path[-4:].lower() == ".csv" + return (path[-4:].lower() == '.csv') def suffix_for_compression_type(compression): - if compression == "gzip": - return ".gz" + if compression == 'gzip': + return '.gz' - return "" + return '' def compression_type_for_path(path): if is_gzip_path(path): - return "gzip" + return 'gzip' if is_zip_path(path): - return "zip" + return 'zip' return None @@ -204,10 +204,10 @@ def read_file(path): compression = compression_type_for_path(path) open_func = { - "gzip": gzip.open, + 'gzip': gzip.open, None: open, } - with open_func[compression](path, "r") as fp: + with open_func[compression](path, 'r') as fp: return fp.read() @@ -219,7 +219,7 @@ def string_to_temp_file(string, suffix=None): temp_file_path = create_temp_file(suffix=suffix) - with open(temp_file_path, "w") as f: + with open(temp_file_path, 'w') as f: f.write(string) return temp_file_path @@ -232,10 +232,10 @@ def zip_check(file_path, compression_type): """ if file_path: - if file_path.split("/")[-1].split(".")[-1] == "zip": + if file_path.split('/')[-1].split('.')[-1] == 'zip': return True - if compression_type == "zip": + if compression_type == 'zip': return True else: @@ -257,9 +257,9 @@ def extract_file_name(file_path=None, include_suffix=True): return None if include_suffix: - return file_path.split("/")[-1] + return file_path.split('/')[-1] - return file_path.split("/")[-1].split(".")[0] + return file_path.split('/')[-1].split('.')[0] def has_data(file_path): @@ -303,7 +303,7 @@ def generate_tempfile(suffix=None, create=False): for _ in range(TMP_MAX): name = next(names) if suffix: - name = f"{name}{suffix}" + name = f'{name}{suffix}' path = os.path.join(temp_dir, name) # Check to see if the path already exists. @@ -319,14 +319,15 @@ def generate_tempfile(suffix=None, create=False): # open it in read mode later, they won't get an error about the file not existing. # Also, use mode='x' (exclusive create) to make sure we get an error if the file already # exists - with open(path, mode="x") as _: + with open(path, mode='x') as _: pass return path # PermissionError can be Windows' way of saying the file exists except (FileExistsError, PermissionError): - continue # try again with another filename if we got an error + continue # try again with another filename if we got an error - raise FileExistsError(errno.EEXIST, "No usable temporary directory name found") + raise FileExistsError(errno.EEXIST, + "No usable temporary directory name found") class TempDirectory: diff --git a/parsons/utilities/json_format.py b/parsons/utilities/json_format.py index e15aaaebd3..1a9a367e89 100644 --- a/parsons/utilities/json_format.py +++ b/parsons/utilities/json_format.py @@ -1,3 +1,4 @@ + def arg_format(arg): """ Many APIs require arguments to formatted like this 'thisTypeConfig' which is not the standard @@ -5,11 +6,11 @@ def arg_format(arg): 'thisTypeConfig' """ - arg_list = arg.split("_") + arg_list = arg.split('_') arg_list = [a.capitalize() for a in arg_list] arg_list[0] = arg_list[0].lower() - return "".join(arg_list) + return ''.join(arg_list) def remove_empty_keys(dirty_dict): @@ -35,14 +36,14 @@ def flatten_json(json): """ out = {} - def flatten(x, name=""): + def flatten(x, name=''): if type(x) is dict: for k, v in x.items(): flatten(v, k) elif type(x) is list: for a in x: flatten(a) - elif name != "" and name not in out: + elif name != '' and name not in out: out[name] = x flatten(json) diff --git a/parsons/utilities/oauth_api_connector.py b/parsons/utilities/oauth_api_connector.py index b8356e3813..8008fc0a1a 100644 --- a/parsons/utilities/oauth_api_connector.py +++ b/parsons/utilities/oauth_api_connector.py @@ -36,35 +36,20 @@ class OAuth2APIConnector(APIConnector): """ def __init__( - self, - uri, - headers=None, - auth=None, - pagination_key=None, - data_key=None, - client_id=None, - client_secret=None, - token_url=None, - auto_refresh_url=None, + self, uri, headers=None, auth=None, pagination_key=None, data_key=None, + client_id=None, client_secret=None, token_url=None, auto_refresh_url=None ): super().__init__( - uri, - headers=headers, - auth=auth, - pagination_key=pagination_key, - data_key=data_key, + uri, headers=headers, auth=auth, pagination_key=pagination_key, data_key=data_key ) client = BackendApplicationClient(client_id=client_id) oauth = OAuth2Session(client=client) - self.token = oauth.fetch_token( - token_url=token_url, client_id=client_id, client_secret=client_secret - ) + self.token = oauth.fetch_token(token_url=token_url, + client_id=client_id, client_secret=client_secret) self.client = OAuth2Session( - client_id, - token=self.token, - auto_refresh_url=auto_refresh_url, - token_updater=self.token_saver, + client_id, token=self.token, auto_refresh_url=auto_refresh_url, + token_updater=self.token_saver ) def request(self, url, req_type, json=None, data=None, params=None): @@ -91,13 +76,8 @@ def request(self, url, req_type, json=None, data=None, params=None): """ full_url = urllib.parse.urljoin(self.uri, url) return self.client.request( - req_type, - full_url, - headers=self.headers, - auth=self.auth, - json=json, - data=data, - params=params, + req_type, full_url, headers=self.headers, auth=self.auth, + json=json, data=data, params=params ) def token_saver(self, token): diff --git a/parsons/utilities/sql_helpers.py b/parsons/utilities/sql_helpers.py index fe3753c971..ac32ca811e 100644 --- a/parsons/utilities/sql_helpers.py +++ b/parsons/utilities/sql_helpers.py @@ -1,6 +1,6 @@ import re -__all__ = ["redact_credentials"] +__all__ = ['redact_credentials'] def redact_credentials(sql): @@ -9,6 +9,6 @@ def redact_credentials(sql): """ pattern = "credentials\s+'(.+\n?)+[^(\\)]'" # noqa: W605 - sql_censored = re.sub(pattern, "CREDENTIALS REDACTED", sql, flags=re.IGNORECASE) + sql_censored = re.sub(pattern, 'CREDENTIALS REDACTED', sql, flags=re.IGNORECASE) return sql_censored diff --git a/parsons/utilities/zip_archive.py b/parsons/utilities/zip_archive.py index 33179e9c32..a7de6a9122 100644 --- a/parsons/utilities/zip_archive.py +++ b/parsons/utilities/zip_archive.py @@ -3,7 +3,7 @@ from parsons.utilities.files import create_temp_directory -def create_archive(archive_path, file_path, file_name=None, if_exists="replace"): +def create_archive(archive_path, file_path, file_name=None, if_exists='replace'): """ Create and fill an archive. @@ -20,13 +20,13 @@ def create_archive(archive_path, file_path, file_name=None, if_exists="replace") Zip archive path """ - if if_exists == "append": - write_type = "a" + if if_exists == 'append': + write_type = 'a' else: - write_type = "w" + write_type = 'w' if not file_name: - file_name = file_path.split("/")[-1] + file_name = file_path.split('/')[-1] with zipfile.ZipFile(archive_path, write_type) as z: z.write(file_path, arcname=file_name, compress_type=zipfile.ZIP_STORED) @@ -49,7 +49,7 @@ def unzip_archive(archive_path, destination=None): """ destination = destination or create_temp_directory() - with zipfile.ZipFile(archive_path, "r") as z: + with zipfile.ZipFile(archive_path, 'r') as z: file_name = z.namelist()[0] z.extractall(path=destination) return os.path.join(destination, file_name) diff --git a/parsons/zoom/__init__.py b/parsons/zoom/__init__.py index b59d76a965..f439ab8943 100644 --- a/parsons/zoom/__init__.py +++ b/parsons/zoom/__init__.py @@ -1,3 +1,5 @@ from parsons.zoom.zoom import Zoom -__all__ = ["Zoom"] +__all__ = [ + 'Zoom' +] diff --git a/parsons/zoom/zoom.py b/parsons/zoom/zoom.py index 095e097aaa..7fd89748e7 100644 --- a/parsons/zoom/zoom.py +++ b/parsons/zoom/zoom.py @@ -7,10 +7,7 @@ logger = logging.getLogger(__name__) -ZOOM_URI = "https://api.zoom.us/v2/" -ZOOM_AUTH_CALLBACK = "https://zoom.us/oauth/token" - -########## +ZOOM_URI = 'https://api.zoom.us/v2/' class Zoom: @@ -26,77 +23,25 @@ class Zoom: variable set. """ - def __init__(self, account_id=None, client_id=None, client_secret=None): - self.account_id = check_env.check("ZOOM_ACCOUNT_ID", account_id) - self.client_id = check_env.check("ZOOM_CLIENT_ID", client_id) - self.__client_secret = check_env.check("ZOOM_CLIENT_SECRET", client_secret) - - self.client = APIConnector(uri=ZOOM_URI) - - access_token = self.__generate_access_token() - - self.client.headers = { - "Authorization": f"Bearer {access_token}", - "Content-type": "application/json", - } - - def __generate_access_token(self) -> str: - """ - Uses Zoom's OAuth callback URL to generate an access token to query the Zoom API - - `Returns`: - String representation of access token - """ + def __init__(self, api_key=None, api_secret=None): - temp_client = APIConnector( - uri=ZOOM_URI, auth=(self.client_id, self.__client_secret) - ) + self.api_key = check_env.check('ZOOM_API_KEY', api_key) + self.api_secret = check_env.check('ZOOM_API_SECRET', api_secret) + self.client = APIConnector(ZOOM_URI) - resp = temp_client.post_request( - ZOOM_AUTH_CALLBACK, - data={ - "grant_type": "account_credentials", - "account_id": self.account_id, - }, - ) + def refresh_header_token(self): + # Generate a token that is valid for 30 seconds and update header. Full documentation + # on JWT generation using Zoom API: https://marketplace.zoom.us/docs/guides/auth/jwt - return resp["access_token"] - - def __refresh_header_token(self): - """ - NOTE: This function is deprecated as Zoom's API moves to an OAuth strategy on 9/1 - - Generate a token that is valid for 30 seconds and update header. Full documentation - on JWT generation using Zoom API: https://marketplace.zoom.us/docs/guides/auth/jwt - """ - - payload = { - "iss": self.api_key, - "exp": int(datetime.datetime.now().timestamp() + 30), - } - token = jwt.encode(payload, self.api_secret, algorithm="HS256") - self.client.headers = { - "authorization": f"Bearer {token}", - "content-type": "application/json", - } + payload = {"iss": self.api_key, "exp": int(datetime.datetime.now().timestamp() + 30)} + token = jwt.encode(payload, self.api_secret, algorithm='HS256') + self.client.headers = {'authorization': f"Bearer {token}", + 'content-type': "application/json"} def _get_request(self, endpoint, data_key, params=None, **kwargs): - """ - TODO: Consider increasing default page size. - - `Args`: - endpoint: str - API endpoint to send GET request - data_key: str - Unique value to use to parse through nested data - (akin to a primary key in response JSON) - params: dict - Additional request parameters, defaults to None - - `Returns`: - Parsons Table of API responses - """ + # To Do: Consider increasing default page size. + self.refresh_header_token() r = self.client.get_request(endpoint, params=params, **kwargs) self.client.data_key = data_key data = self.client.data_parse(r) @@ -105,7 +50,7 @@ def _get_request(self, endpoint, data_key, params=None, **kwargs): params = {} # Return a dict or table if only one item. - if "page_number" not in r.keys(): + if 'page_number' not in r.keys(): if isinstance(data, dict): return data if isinstance(data, list): @@ -113,13 +58,13 @@ def _get_request(self, endpoint, data_key, params=None, **kwargs): # Else iterate through the pages and return a Table else: - while r["page_number"] < r["page_count"]: - params["page_number"] = int(r["page_number"]) + 1 + while r['page_number'] < r['page_count']: + params['page_number'] = int(r['page_number']) + 1 r = self.client.get_request(endpoint, params=params, **kwargs) data.extend(self.client.data_parse(r)) return Table(data) - def get_users(self, status="active", role_id=None): + def get_users(self, status='active', role_id=None): """ Get users. @@ -134,16 +79,17 @@ def get_users(self, status="active", role_id=None): See :ref:`parsons-table` for output options. """ - if status not in ["active", "inactive", "pending"]: - raise ValueError("Invalid status type provided.") + if status not in ['active', 'inactive', 'pending']: + raise ValueError('Invalid status type provided.') - params = {"status": status, "role_id": role_id} + params = {'status': status, + 'role_id': role_id} - tbl = self._get_request("users", "users", params=params) - logger.info(f"Retrieved {tbl.num_rows} users.") + tbl = self._get_request('users', 'users', params=params) + logger.info(f'Retrieved {tbl.num_rows} users.') return tbl - def get_meetings(self, user_id, meeting_type="scheduled"): + def get_meetings(self, user_id, meeting_type='scheduled'): """ Get meetings scheduled by a user. @@ -172,8 +118,8 @@ def get_meetings(self, user_id, meeting_type="scheduled"): See :ref:`parsons-table` for output options. """ - tbl = self._get_request(f"users/{user_id}/meetings", "meetings") - logger.info(f"Retrieved {tbl.num_rows} meetings.") + tbl = self._get_request(f'users/{user_id}/meetings', 'meetings') + logger.info(f'Retrieved {tbl.num_rows} meetings.') return tbl def get_past_meeting(self, meeting_uuid): @@ -188,8 +134,8 @@ def get_past_meeting(self, meeting_uuid): See :ref:`parsons-table` for output options. """ - tbl = self._get_request(f"past_meetings/{meeting_uuid}", None) - logger.info(f"Retrieved meeting {meeting_uuid}.") + tbl = self._get_request(f'past_meetings/{meeting_uuid}', None) + logger.info(f'Retrieved meeting {meeting_uuid}.') return tbl def get_past_meeting_participants(self, meeting_id): @@ -204,10 +150,8 @@ def get_past_meeting_participants(self, meeting_id): See :ref:`parsons-table` for output options. """ - tbl = self._get_request( - f"report/meetings/{meeting_id}/participants", "participants" - ) - logger.info(f"Retrieved {tbl.num_rows} participants.") + tbl = self._get_request(f'report/meetings/{meeting_id}/participants', 'participants') + logger.info(f'Retrieved {tbl.num_rows} participants.') return tbl def get_meeting_registrants(self, meeting_id): @@ -222,8 +166,8 @@ def get_meeting_registrants(self, meeting_id): See :ref:`parsons-table` for output options. """ - tbl = self._get_request(f"meetings/{meeting_id}/registrants", "registrants") - logger.info(f"Retrieved {tbl.num_rows} registrants.") + tbl = self._get_request(f'meetings/{meeting_id}/registrants', 'registrants') + logger.info(f'Retrieved {tbl.num_rows} registrants.') return tbl def get_user_webinars(self, user_id): @@ -238,8 +182,8 @@ def get_user_webinars(self, user_id): See :ref:`parsons-table` for output options. """ - tbl = self._get_request(f"users/{user_id}/webinars", "webinars") - logger.info(f"Retrieved {tbl.num_rows} webinars.") + tbl = self._get_request(f'users/{user_id}/webinars', 'webinars') + logger.info(f'Retrieved {tbl.num_rows} webinars.') return tbl def get_past_webinar_participants(self, webinar_id): @@ -254,10 +198,8 @@ def get_past_webinar_participants(self, webinar_id): See :ref:`parsons-table` for output options. """ - tbl = self._get_request( - f"report/webinars/{webinar_id}/participants", "participants" - ) - logger.info(f"Retrieved {tbl.num_rows} webinar participants.") + tbl = self._get_request(f'report/webinars/{webinar_id}/participants', 'participants') + logger.info(f'Retrieved {tbl.num_rows} webinar participants.') return tbl def get_webinar_registrants(self, webinar_id): @@ -272,6 +214,6 @@ def get_webinar_registrants(self, webinar_id): See :ref:`parsons-table` for output options. """ - tbl = self._get_request(f"webinars/{webinar_id}/registrants", "registrants") - logger.info(f"Retrieved {tbl.num_rows} webinar registrants.") + tbl = self._get_request(f'webinars/{webinar_id}/registrants', 'registrants') + logger.info(f'Retrieved {tbl.num_rows} webinar registrants.') return tbl diff --git a/requirements.txt b/requirements.txt index da855fcc60..c8bbdda9a9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,32 +1,30 @@ -requests==2.31.0 +requests==2.25.1 petl==1.6.8 boto3==1.17.98 boxsdk==2.10.0 -civis==1.16.0 +civis==1.14.2 slackclient==1.3.0 -psycopg2-binary==2.9.3 +psycopg2-binary==2.8.5 xmltodict==0.11.0 gspread==3.7.0 oauth2client==4.1.3 -google-auth==2.6.2 -facebook-business==13.0.0 +facebook-business==6.0.0 google-api-python-client==1.7.7 -google-resumable-media==2.3.2 -grpcio==1.53.0 +google-resumable-media!=0.4.0,<0.5.0dev,>=0.3.1 httplib2==0.19.0 validate-email==1.3 -paramiko==2.11.0 +paramiko==2.7.2 xmltodict==0.11.0 -joblib==1.2.0 +joblib==0.14.1 censusgeocode==0.4.3.post1 airtable-python-wrapper==0.13.0 -google-cloud-storage==2.2.0 -google-cloud-bigquery==3.4.0 +google-cloud-storage==1.17.0 +google-cloud-bigquery==1.21.0 docutils<0.18,>=0.14 urllib3==1.26.5 simplejson==3.16.0 -twilio==8.2.1 -simple-salesforce==1.11.6 +twilio==6.30.0 +simple-salesforce==0.74.3 suds-py3==1.3.4.0 newmode==0.1.6 mysql-connector-python==8.0.18 @@ -35,7 +33,7 @@ python-dateutil==2.8.1 azure-storage-blob==12.3.2 PyGitHub==1.51 surveygizmo==1.2.3 -PyJWT==2.4.0 # Otherwise `import jwt` would refer to python-jwt package +PyJWT==2.0.1 # Otherwise `import jwt` would refer to python-jwt package SQLAlchemy==1.3.23 requests_oauthlib==1.3.0 bs4==0.0.1 @@ -43,9 +41,8 @@ bs4==0.0.1 # Testing Requirements requests-mock==1.5.2 flake8==4.0.1 -black==22.12.0 -testfixtures==6.18.5 -pytest==7.1.1 +testfixtures==6.4.3 +pytest==5.2.0 pytest-datadir==1.3.0 # Stuff for TMC scripts diff --git a/setup.py b/setup.py index 7d4bd5c462..1d0219fdcd 100644 --- a/setup.py +++ b/setup.py @@ -2,80 +2,29 @@ from setuptools import find_packages from distutils.core import setup +THIS_DIR = os.path.abspath(os.path.dirname(__file__)) + def main(): - limited_deps = os.environ.get("PARSONS_LIMITED_DEPENDENCIES", "") - if limited_deps.strip().upper() in ("1", "YES", "TRUE", "ON"): - install_requires = [ - "petl", - "python-dateutil", - "requests", - "requests_oauthlib", - "simplejson", - ] - extras_require = { - "airtable": ["airtable-python-wrapper"], - "alchemer": ["surveygizmo"], - "azure": ["azure-storage-blob"], - "box": ["boxsdk"], - "braintree": ["braintree"], - "civis": ["civis"], - "facebook": ["joblib", "facebook-business"], - "geocode": ["censusgeocode"], - "github": ["PyGitHub"], - "google": [ - "apiclient", - "google-api-python-client", - "google-cloud-bigquery", - "google-cloud-storage", - "gspread", - "httplib2", - "oauth2client", - "validate-email", - ], - "mysql": ["mysql-connector-python", "SQLAlchemy"], - "newmode": ["newmode"], - "ngpvan": ["suds-py3"], - "postgres": ["psycopg2-binary", "SQLAlchemy"], - "redshift": ["boto3", "psycopg2-binary", "SQLAlchemy"], - "s3": ["boto3"], - "salesforce": ["simple-salesforce"], - "sftp": ["paramiko"], - "slack": ["slackclient<2"], - "smtp": ["validate-email"], - "targetsmart": ["xmltodict"], - "twilio": ["twilio"], - "zoom": ["PyJWT"], - } - extras_require["all"] = sorted( - {lib for libs in extras_require.values() for lib in libs} - ) - else: - THIS_DIR = os.path.abspath(os.path.dirname(__file__)) - with open(os.path.join(THIS_DIR, "requirements.txt")) as reqs: - install_requires = reqs.read().strip().split("\n") - # No op for forward-compatibility - extras_require = {"all": []} + with open(os.path.join(THIS_DIR, 'requirements.txt')) as reqs: + requirements = reqs.read().strip().split('\n') setup( name="parsons", - version="2.1.0", + version='0.18.0', author="The Movement Cooperative", author_email="info@movementcooperative.org", - url="https://github.com/move-coop/parsons", - keywords=["PROGRESSIVE", "API", "ETL"], + url='https://github.com/movementcoop/parsons', + keywords=['PROGRESSIVE', 'API', 'ETL'], packages=find_packages(), - install_requires=install_requires, - extras_require=extras_require, + install_requires=requirements, classifiers=[ - "Development Status :: 3 - Alpha", - "Intended Audience :: Developers", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - ], - python_requires=">=3.7.0,<3.11.0", + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8' + ] ) diff --git a/test/fixtures.py b/test/fixtures.py index 346639dd13..aebb95698d 100644 --- a/test/fixtures.py +++ b/test/fixtures.py @@ -2,25 +2,25 @@ from parsons.etl import Table -""" +''' Simple Table The bare minimum Parsons table, and matching files representing that table. -""" +''' @pytest.fixture def simple_table(): # Note - If you modify this table, you must also update the related "simple" files. # Fortunately Parsons should make that easy to do :) - return Table([{"first": "Bob", "last": "Smith"}]) + return Table([{'first': 'Bob', 'last': 'Smith'}]) @pytest.fixture def simple_csv_path(shared_datadir): - return str(shared_datadir / "test-simple-table.csv") + return str(shared_datadir / 'test-simple-table.csv') @pytest.fixture def simple_compressed_csv_path(shared_datadir): - return str(shared_datadir / "test-simple-table.csv.gz") + return str(shared_datadir / 'test-simple-table.csv.gz') diff --git a/test/responses/ts_responses.py b/test/responses/ts_responses.py index 25ef06376f..0a998733e8 100644 --- a/test/responses/ts_responses.py +++ b/test/responses/ts_responses.py @@ -1,363 +1,205 @@ + # TargetSmart Test response # #### District Endpoint #### district_match = { - "vb.vf_reg_cass_state": "IL", - "vb.vf_reg_cass_zip": "60622", - "vb.reg_cass_zip4": "7194", - "vb.vf_precinct_id": "36", - "vb.vf_precinct_name": "CITY OF CHICAGO W-26 P-36", - "vb.vf_ward": "26", - "vb.vf_township": "", - "vb.vf_county_code": "031", - "vb.vf_county_name": "CITY OF CHICAGO", - "vb.vf_sd": "002", - "vb.vf_hd": "004", - "vb.vf_cd": "007", - "vb.vf_city_council": "", - "vb.vf_municipal_district": "CHICAGO", - "vb.vf_county_council": "08", - "vb.vf_judicial_district": "CHICAGO", - "vb.vf_school_district": "CHICAGO PUBLIC SCHOOLS", - "distance_away_km": 0.0, - "statezip5zip4": "il606227194", - "zip5zip4": "606227194", - "FilteredFirstCut.vb.vf_reg_cass_state": "IL", - "FilteredFirstCut.vb.vf_reg_cass_zip": "60622", - "FilteredFirstCut.vb.reg_cass_zip4": "7194", - "FilteredFirstCut.vb.vf_precinct_id": "36", - "FilteredFirstCut.vb.vf_precinct_name": "CITY OF CHICAGO W-26 P-36", - "FilteredFirstCut.vb.vf_ward": "26", - "FilteredFirstCut.vb.vf_township": "", - "FilteredFirstCut.vb.vf_county_code": "031", - "FilteredFirstCut.vb.vf_county_name": "CITY OF CHICAGO", - "FilteredFirstCut.vb.vf_sd": "002", - "FilteredFirstCut.vb.vf_hd": "004", - "FilteredFirstCut.vb.vf_cd": "007", - "FilteredFirstCut.vb.vf_city_council": "", - "FilteredFirstCut.vb.vf_municipal_district": "CHICAGO", - "FilteredFirstCut.vb.vf_county_council": "08", - "FilteredFirstCut.vb.vf_judicial_district": "CHICAGO", - "FilteredFirstCut.vb.vf_school_district": "CHICAGO PUBLIC SCHOOLS", - "voters_in_district": "2", - "z9_latitude": 41.898369, - "z9_longitude": -87.694382, + 'vb.vf_reg_cass_state': 'IL', 'vb.vf_reg_cass_zip': '60622', + 'vb.reg_cass_zip4': '7194', 'vb.vf_precinct_id': '36', 'vb.vf_precinct_name': + 'CITY OF CHICAGO W-26 P-36', 'vb.vf_ward': '26', 'vb.vf_township': '', + 'vb.vf_county_code': '031', 'vb.vf_county_name': 'CITY OF CHICAGO', + 'vb.vf_sd': '002', 'vb.vf_hd': '004', 'vb.vf_cd': '007', 'vb.vf_city_council': + '', 'vb.vf_municipal_district': 'CHICAGO', 'vb.vf_county_council': '08', + 'vb.vf_judicial_district': 'CHICAGO', 'vb.vf_school_district': 'CHICAGO PUBLIC SCHOOLS', + 'distance_away_km': 0.0, 'statezip5zip4': 'il606227194', 'zip5zip4': + '606227194', 'FilteredFirstCut.vb.vf_reg_cass_state': 'IL', + 'FilteredFirstCut.vb.vf_reg_cass_zip': '60622', + 'FilteredFirstCut.vb.reg_cass_zip4': '7194', + 'FilteredFirstCut.vb.vf_precinct_id': '36', + 'FilteredFirstCut.vb.vf_precinct_name': 'CITY OF CHICAGO W-26 P-36', + 'FilteredFirstCut.vb.vf_ward': '26', 'FilteredFirstCut.vb.vf_township': '', + 'FilteredFirstCut.vb.vf_county_code': '031', + 'FilteredFirstCut.vb.vf_county_name': 'CITY OF CHICAGO', + 'FilteredFirstCut.vb.vf_sd': '002', 'FilteredFirstCut.vb.vf_hd': '004', + 'FilteredFirstCut.vb.vf_cd': '007', 'FilteredFirstCut.vb.vf_city_council': '', + 'FilteredFirstCut.vb.vf_municipal_district': 'CHICAGO', + 'FilteredFirstCut.vb.vf_county_council': '08', + 'FilteredFirstCut.vb.vf_judicial_district': 'CHICAGO', + 'FilteredFirstCut.vb.vf_school_district': 'CHICAGO PUBLIC SCHOOLS', + 'voters_in_district': '2', 'z9_latitude': 41.898369, 'z9_longitude': -87.694382, } district_point = { - "input": { - "search_type": "point", - "latitude": "41.898369", - "longitude": "-87.694382", - }, - "error": None, - "match_found": True, - "match_data": district_match, - "gateway_id": "782cbcf0-039c- 11e9-a8b0-37650c25b496", - "function_id": "782dce4b-039c-11e9-8056-390eede430de", + 'input': { + 'search_type': 'point', 'latitude': '41.898369', + 'longitude': '-87.694382'}, + 'error': None, 'match_found': True, + 'match_data': district_match, + 'gateway_id': '782cbcf0-039c- 11e9-a8b0-37650c25b496', + 'function_id': '782dce4b-039c-11e9-8056-390eede430de' } district_zip = { - "input": {"search_type": "zip", "zip5": "60622", "zip4": "7194"}, - "error": None, - "match_found": True, - "match_data": { - "vb.vf_reg_cass_state": "IL", - "vb.vf_reg_cass_zip": "60622", - "vb.reg_cass_zip4": "7194", - "vb.vf_precinct_id": "36", - "vb.vf_precinct_name": "CITY OF CHICAGO W-26 P-36", - "vb.vf_ward": "26", - "vb.vf_township": "", - "vb.vf_county_code": "031", - "vb.vf_county_name": "CITY OF CHICAGO", - "vb.vf_sd": "002", - "vb.vf_hd": "004", - "vb.vf_cd": "007", - "vb.vf_city_council": "", - "vb.vf_municipal_district": "CHICAGO", - "vb.vf_county_council": "08", - "vb.vf_judicial_district": "CHICAGO", - "vb.vf_school_district": "CHICAGO PUBLIC SCHOOLS", - }, - "gateway_id": "fd1e561c-03ad-11e9-a77a-899cbb59a220", - "function_id": "fd1f40fe-03ad-11e9-b9b2-8da4a145abf6", + 'input': {'search_type': 'zip', 'zip5': '60622', 'zip4': '7194'}, + 'error': None, 'match_found': True, + 'match_data': { + 'vb.vf_reg_cass_state': 'IL', + 'vb.vf_reg_cass_zip': '60622', 'vb.reg_cass_zip4': '7194', + 'vb.vf_precinct_id': '36', 'vb.vf_precinct_name': 'CITY OF CHICAGO W-26 P-36', + 'vb.vf_ward': '26', 'vb.vf_township': '', 'vb.vf_county_code': '031', + 'vb.vf_county_name': 'CITY OF CHICAGO', 'vb.vf_sd': '002', 'vb.vf_hd': '004', + 'vb.vf_cd': '007', 'vb.vf_city_council': '', 'vb.vf_municipal_district': 'CHICAGO', + 'vb.vf_county_council': '08', 'vb.vf_judicial_district': 'CHICAGO', + 'vb.vf_school_district': 'CHICAGO PUBLIC SCHOOLS'}, + 'gateway_id': 'fd1e561c-03ad-11e9-a77a-899cbb59a220', + 'function_id': 'fd1f40fe-03ad-11e9-b9b2-8da4a145abf6' } district_expected = [ - "vb.vf_reg_cass_state", - "vb.vf_reg_cass_zip", - "vb.reg_cass_zip4", - "vb.vf_precinct_id", - "vb.vf_precinct_name", - "vb.vf_ward", - "vb.vf_township", - "vb.vf_county_code", - "vb.vf_county_name", - "vb.vf_sd", - "vb.vf_hd", - "vb.vf_cd", - "vb.vf_city_council", - "vb.vf_municipal_district", - "vb.vf_county_council", - "vb.vf_judicial_district", - "vb.vf_school_district", - "distance_away_km", - "statezip5zip4", - "zip5zip4", - "FilteredFirstCut.vb.vf_reg_cass_state", - "FilteredFirstCut.vb.vf_reg_cass_zip", - "FilteredFirstCut.vb.reg_cass_zip4", - "FilteredFirstCut.vb.vf_precinct_id", - "FilteredFirstCut.vb.vf_precinct_name", - "FilteredFirstCut.vb.vf_ward", - "FilteredFirstCut.vb.vf_township", - "FilteredFirstCut.vb.vf_county_code", - "FilteredFirstCut.vb.vf_county_name", - "FilteredFirstCut.vb.vf_sd", - "FilteredFirstCut.vb.vf_hd", - "FilteredFirstCut.vb.vf_cd", - "FilteredFirstCut.vb.vf_city_council", - "FilteredFirstCut.vb.vf_municipal_district", - "FilteredFirstCut.vb.vf_county_council", - "FilteredFirstCut.vb.vf_judicial_district", - "FilteredFirstCut.vb.vf_school_district", - "voters_in_district", - "z9_latitude", - "z9_longitude", + 'vb.vf_reg_cass_state', 'vb.vf_reg_cass_zip', 'vb.reg_cass_zip4', + 'vb.vf_precinct_id', 'vb.vf_precinct_name', 'vb.vf_ward', + 'vb.vf_township', 'vb.vf_county_code', 'vb.vf_county_name', + 'vb.vf_sd', 'vb.vf_hd', 'vb.vf_cd', 'vb.vf_city_council', + 'vb.vf_municipal_district', 'vb.vf_county_council', + 'vb.vf_judicial_district', 'vb.vf_school_district', + 'distance_away_km', 'statezip5zip4', 'zip5zip4', + 'FilteredFirstCut.vb.vf_reg_cass_state', + 'FilteredFirstCut.vb.vf_reg_cass_zip', + 'FilteredFirstCut.vb.reg_cass_zip4', + 'FilteredFirstCut.vb.vf_precinct_id', + 'FilteredFirstCut.vb.vf_precinct_name', + 'FilteredFirstCut.vb.vf_ward', + 'FilteredFirstCut.vb.vf_township', + 'FilteredFirstCut.vb.vf_county_code', + 'FilteredFirstCut.vb.vf_county_name', + 'FilteredFirstCut.vb.vf_sd', + 'FilteredFirstCut.vb.vf_hd', 'FilteredFirstCut.vb.vf_cd', + 'FilteredFirstCut.vb.vf_city_council', + 'FilteredFirstCut.vb.vf_municipal_district', + 'FilteredFirstCut.vb.vf_county_council', + 'FilteredFirstCut.vb.vf_judicial_district', + 'FilteredFirstCut.vb.vf_school_district', + 'voters_in_district', 'z9_latitude', + 'z9_longitude' ] address_response = { - "input": {"address": "100 N Main St, Chicago, IL 60622", "search_type": "address"}, - "error": None, - "match_found": True, - "match_data": district_match, - "gateway_id": "febd57bc-03bb-11e9-ad98-592b0545ec68", - "function_id": "febdf462-03bb-11e9-a944-25816baaec7e", -} + 'input': { + 'address': '100 N Main St, Chicago, IL 60622', + 'search_type': 'address'}, + 'error': None, 'match_found': True, 'match_data': district_match, + 'gateway_id': 'febd57bc-03bb-11e9-ad98-592b0545ec68', + 'function_id': 'febdf462-03bb-11e9-a944-25816baaec7e'} address_expected = [ - "vb.vf_reg_cass_state", - "vb.vf_reg_cass_zip", - "vb.reg_cass_zip4", - "vb.vf_precinct_id", - "vb.vf_precinct_name", - "vb.vf_ward", - "vb.vf_township", - "vb.vf_county_code", - "vb.vf_county_name", - "vb.vf_sd", - "vb.vf_hd", - "vb.vf_cd", - "vb.vf_city_council", - "vb.vf_municipal_district", - "vb.vf_county_council", - "vb.vf_judicial_district", - "vb.vf_school_district", - "distance_away_km", - "statezip5zip4", - "zip5zip4", - "FilteredFirstCut.vb.vf_reg_cass_state", - "FilteredFirstCut.vb.vf_reg_cass_zip", - "FilteredFirstCut.vb.reg_cass_zip4", - "FilteredFirstCut.vb.vf_precinct_id", - "FilteredFirstCut.vb.vf_precinct_name", - "FilteredFirstCut.vb.vf_ward", - "FilteredFirstCut.vb.vf_township", - "FilteredFirstCut.vb.vf_county_code", - "FilteredFirstCut.vb.vf_county_name", - "FilteredFirstCut.vb.vf_sd", - "FilteredFirstCut.vb.vf_hd", - "FilteredFirstCut.vb.vf_cd", - "FilteredFirstCut.vb.vf_city_council", - "FilteredFirstCut.vb.vf_municipal_district", - "FilteredFirstCut.vb.vf_county_council", - "FilteredFirstCut.vb.vf_judicial_district", - "FilteredFirstCut.vb.vf_school_district", - "voters_in_district", - "z9_latitude", - "z9_longitude", + 'vb.vf_reg_cass_state', 'vb.vf_reg_cass_zip', 'vb.reg_cass_zip4', 'vb.vf_precinct_id', + 'vb.vf_precinct_name', 'vb.vf_ward', 'vb.vf_township', 'vb.vf_county_code', 'vb.vf_county_name', + 'vb.vf_sd', 'vb.vf_hd', 'vb.vf_cd', 'vb.vf_city_council', 'vb.vf_municipal_district', + 'vb.vf_county_council', 'vb.vf_judicial_district', 'vb.vf_school_district', 'distance_away_km', + 'statezip5zip4', 'zip5zip4', 'FilteredFirstCut.vb.vf_reg_cass_state', + 'FilteredFirstCut.vb.vf_reg_cass_zip', 'FilteredFirstCut.vb.reg_cass_zip4', + 'FilteredFirstCut.vb.vf_precinct_id', 'FilteredFirstCut.vb.vf_precinct_name', + 'FilteredFirstCut.vb.vf_ward', 'FilteredFirstCut.vb.vf_township', + 'FilteredFirstCut.vb.vf_county_code', 'FilteredFirstCut.vb.vf_county_name', + 'FilteredFirstCut.vb.vf_sd', 'FilteredFirstCut.vb.vf_hd', 'FilteredFirstCut.vb.vf_cd', + 'FilteredFirstCut.vb.vf_city_council', 'FilteredFirstCut.vb.vf_municipal_district', + 'FilteredFirstCut.vb.vf_county_council', 'FilteredFirstCut.vb.vf_judicial_district', + 'FilteredFirstCut.vb.vf_school_district', 'voters_in_district', 'z9_latitude', 'z9_longitude' ] zip_expected = [ - "vb.vf_reg_cass_state", - "vb.vf_reg_cass_zip", - "vb.reg_cass_zip4", - "vb.vf_precinct_id", - "vb.vf_precinct_name", - "vb.vf_ward", - "vb.vf_township", - "vb.vf_county_code", - "vb.vf_county_name", - "vb.vf_sd", - "vb.vf_hd", - "vb.vf_cd", - "vb.vf_city_council", - "vb.vf_municipal_district", - "vb.vf_county_council", - "vb.vf_judicial_district", - "vb.vf_school_district", + 'vb.vf_reg_cass_state', 'vb.vf_reg_cass_zip', 'vb.reg_cass_zip4', 'vb.vf_precinct_id', + 'vb.vf_precinct_name', 'vb.vf_ward', 'vb.vf_township', 'vb.vf_county_code', 'vb.vf_county_name', + 'vb.vf_sd', 'vb.vf_hd', 'vb.vf_cd', 'vb.vf_city_council', 'vb.vf_municipal_district', + 'vb.vf_county_council', 'vb.vf_judicial_district', 'vb.vf_school_district' ] # ### Radius Endpoint ### radius_response = { - "input": { - "first_name": "Billy", - "last_name": "Blanks", - "address": "100 N Main St, Chicago, IL", - "radius_size": "100", - "radius_unit": "miles", - "max_results": "10", - "gender": "a", - "composite_score_min": "1", - "composite_score_max": "100", - "last_name_exact": "True", - "last_name_is_prefix": "False", - "last_name_prefix_length": "10", - }, - "error": None, - "output": [ - { - "similarity_score": 92, - "data_fields": { - "vb.vf_g2014": "Y", - "vb.tsmart_middle_name": "H", - "vb.vf_reg_cass_city": "CHICAGO", - "vb.vf_reg_cass_state": "IL", - "ts.tsmart_midterm_general_turnout_score": "85.5", - "vb.vf_g2016": "Y", - "vb.vf_precinct_name": "CITY OF CHICAGO W-26 P-36", - "vb.voterid": "Q8W8682Y", - "vb.voterbase_phone": "4435705356", - "vb.tsmart_precinct_id": "36", - "vb.tsmart_city": "CHICAGO", - "vb.vf_earliest_registration_date": "20141104", - "vb.vf_registration_date": "20141104", - "vb.vf_reg_cass_zip4": "4455", - "vb.vf_reg_cass_zip": "60622", - "vb.tsmart_precinct_name": "CITY OF CHICAGO W-26 P-36", - "vb.voterbase_gender": "Male", - "vb.voterbase_age": "37", - "vb.tsmart_full_address": "100 N Main St AVE APT 2", - "vb.voterbase_id": "IL-12568678", - "vb.vf_reg_cass_address_full": "100 N Main St AVE # 2", - "vb.tsmart_zip": "60622", - "vb.voterbase_registration_status": "Registered", - "vb.tsmart_state": "IL", - "vb.vf_precinct_id": "36", - "vb.tsmart_partisan_score": "99.6", - "vb.tsmart_name_suffix": "", - "vb.tsmart_last_name": "Blanks", - "vb.tsmart_first_name": "Billy", - "vb.tsmart_zip4": "4455", - }, - "distance_km": "0.0119", - "distance_meters": "11", - "distance_miles": "0.0074", - "distance_feet": "38", - "proximity_score": 100, - "composite_score": 96, - "uniqueness_score": 100, - "confidence_indicator": "Excellent Match", - } - ], - "output_size": 1, - "search_latitude": 41.897826, - "search_longitude": -87.69465, - "gateway_id": "24639609-fb58-11e8-b8af-61c17d74b690", - "function_id": "2464a7ac-fb58-11e8-8fc5-fd9b1d4a95a4", + 'input': { + 'first_name': 'Billy', 'last_name': 'Blanks', + 'address': '100 N Main St, Chicago, IL', 'radius_size': '100', + 'radius_unit': 'miles', 'max_results': '10', 'gender': 'a', + 'composite_score_min': '1', 'composite_score_max': '100', + 'last_name_exact': 'True', 'last_name_is_prefix': 'False', + 'last_name_prefix_length': '10'}, + 'error': None, + 'output': [{'similarity_score': 92, 'data_fields': { + 'vb.vf_g2014': 'Y', + 'vb.tsmart_middle_name': 'H', 'vb.vf_reg_cass_city': 'CHICAGO', + 'vb.vf_reg_cass_state': 'IL', 'ts.tsmart_midterm_general_turnout_score': '85.5', + 'vb.vf_g2016': 'Y', 'vb.vf_precinct_name': 'CITY OF CHICAGO W-26 P-36', + 'vb.voterid': 'Q8W8682Y', 'vb.voterbase_phone': '4435705356', + 'vb.tsmart_precinct_id': '36', 'vb.tsmart_city': 'CHICAGO', + 'vb.vf_earliest_registration_date': '20141104', + 'vb.vf_registration_date': '20141104', 'vb.vf_reg_cass_zip4': '4455', + 'vb.vf_reg_cass_zip': '60622', + 'vb.tsmart_precinct_name': 'CITY OF CHICAGO W-26 P-36', + 'vb.voterbase_gender': 'Male', 'vb.voterbase_age': '37', + 'vb.tsmart_full_address': '100 N Main St AVE APT 2', + 'vb.voterbase_id': 'IL-12568678', + 'vb.vf_reg_cass_address_full': '100 N Main St AVE # 2', + 'vb.tsmart_zip': '60622', 'vb.voterbase_registration_status': 'Registered', + 'vb.tsmart_state': 'IL', 'vb.vf_precinct_id': '36', + 'vb.tsmart_partisan_score': '99.6', 'vb.tsmart_name_suffix': '', + 'vb.tsmart_last_name': 'Blanks', 'vb.tsmart_first_name': 'Billy', + 'vb.tsmart_zip4': '4455'}, 'distance_km': '0.0119', 'distance_meters': '11', + 'distance_miles': '0.0074', 'distance_feet': '38', 'proximity_score': 100, + 'composite_score': 96, 'uniqueness_score': 100, + 'confidence_indicator': 'Excellent Match'}], + 'output_size': 1, + 'search_latitude': 41.897826, 'search_longitude': -87.69465, + 'gateway_id': '24639609-fb58-11e8-b8af-61c17d74b690', + 'function_id': '2464a7ac-fb58-11e8-8fc5-fd9b1d4a95a4', } radius_expected = [ - "similarity_score", - "distance_km", - "distance_meters", - "distance_miles", - "distance_feet", - "proximity_score", - "composite_score", - "uniqueness_score", - "confidence_indicator", - "ts.tsmart_midterm_general_turnout_score", - "vb.tsmart_city", - "vb.tsmart_first_name", - "vb.tsmart_full_address", - "vb.tsmart_last_name", - "vb.tsmart_middle_name", - "vb.tsmart_name_suffix", - "vb.tsmart_partisan_score", - "vb.tsmart_precinct_id", - "vb.tsmart_precinct_name", - "vb.tsmart_state", - "vb.tsmart_zip", - "vb.tsmart_zip4", - "vb.vf_earliest_registration_date", - "vb.vf_g2014", - "vb.vf_g2016", - "vb.vf_precinct_id", - "vb.vf_precinct_name", - "vb.vf_reg_cass_address_full", - "vb.vf_reg_cass_city", - "vb.vf_reg_cass_state", - "vb.vf_reg_cass_zip", - "vb.vf_reg_cass_zip4", - "vb.vf_registration_date", - "vb.voterbase_age", - "vb.voterbase_gender", - "vb.voterbase_id", - "vb.voterbase_phone", - "vb.voterbase_registration_status", - "vb.voterid", + 'similarity_score', 'distance_km', 'distance_meters', 'distance_miles', + 'distance_feet', 'proximity_score', 'composite_score', 'uniqueness_score', + 'confidence_indicator', 'ts.tsmart_midterm_general_turnout_score', + 'vb.tsmart_city', 'vb.tsmart_first_name', 'vb.tsmart_full_address', + 'vb.tsmart_last_name', 'vb.tsmart_middle_name', 'vb.tsmart_name_suffix', + 'vb.tsmart_partisan_score', 'vb.tsmart_precinct_id', 'vb.tsmart_precinct_name', + 'vb.tsmart_state', 'vb.tsmart_zip', 'vb.tsmart_zip4', + 'vb.vf_earliest_registration_date', 'vb.vf_g2014', 'vb.vf_g2016', + 'vb.vf_precinct_id', 'vb.vf_precinct_name', 'vb.vf_reg_cass_address_full', + 'vb.vf_reg_cass_city', 'vb.vf_reg_cass_state', 'vb.vf_reg_cass_zip', + 'vb.vf_reg_cass_zip4', 'vb.vf_registration_date', 'vb.voterbase_age', + 'vb.voterbase_gender', 'vb.voterbase_id', 'vb.voterbase_phone', + 'vb.voterbase_registration_status', 'vb.voterid' ] phone_response = { - "input": {"phones": "4435705356"}, - "error": None, - "result": [ - { - "vb.voterid": "Q8W8652Y", - "vb.tsmart_full_address": "908 N MAIN AVE APT 2", - "vb.voterbase_age": "37", - "vb.tsmart_first_name": "BILLY", - "vb.voterbase_phone": "4435705355", - "vb.vf_g2014": "Y", - "vb.tsmart_last_name": "BLANKS", - "vb.tsmart_zip": "50622", - "vb.tsmart_state": "IL", - "vb.voterbase_gender": "Male", - "vb.tsmart_city": "CHICAGO", - "vb.tsmart_partisan_score": "99.6", - "ts.tsmart_midterm_general_turnout_score": "85.5", - "vb.voterbase_id": "IL-12568678", - "vb.voterbase_registration_status": "Registered", - "vb.vf_g2016": "Y", - "vb.tsmart_middle_name": "H", - "vb.tsmart_name_suffix": "", - } - ], - "gateway_id": "17d19715-062c-11e9-aedb-3d74ea11c29c", - "function_id": "17d2818d-062c-11e9-a4a2-edee7fb1f969", + 'input': {'phones': '4435705356'}, + 'error': None, 'result': [{ + 'vb.voterid': 'Q8W8652Y', + 'vb.tsmart_full_address': '908 N MAIN AVE APT 2', + 'vb.voterbase_age': '37', + 'vb.tsmart_first_name': 'BILLY', + 'vb.voterbase_phone': '4435705355', + 'vb.vf_g2014': 'Y', + 'vb.tsmart_last_name': 'BLANKS', + 'vb.tsmart_zip': '50622', + 'vb.tsmart_state': 'IL', + 'vb.voterbase_gender': 'Male', + 'vb.tsmart_city': 'CHICAGO', + 'vb.tsmart_partisan_score': '99.6', + 'ts.tsmart_midterm_general_turnout_score': '85.5', + 'vb.voterbase_id': 'IL-12568678', + 'vb.voterbase_registration_status': 'Registered', + 'vb.vf_g2016': 'Y', + 'vb.tsmart_middle_name': 'H', + 'vb.tsmart_name_suffix': ''}], + 'gateway_id': '17d19715-062c-11e9-aedb-3d74ea11c29c', + 'function_id': '17d2818d-062c-11e9-a4a2-edee7fb1f969' } phone_expected = [ - "vb.voterid", - "vb.tsmart_full_address", - "vb.voterbase_age", - "vb.tsmart_first_name", - "vb.voterbase_phone", - "vb.vf_g2014", - "vb.tsmart_last_name", - "vb.tsmart_zip", - "vb.tsmart_state", - "vb.voterbase_gender", - "vb.tsmart_city", - "vb.tsmart_partisan_score", - "ts.tsmart_midterm_general_turnout_score", - "vb.voterbase_id", - "vb.voterbase_registration_status", - "vb.vf_g2016", - "vb.tsmart_middle_name", - "vb.tsmart_name_suffix", + 'vb.voterid', 'vb.tsmart_full_address', 'vb.voterbase_age', 'vb.tsmart_first_name', + 'vb.voterbase_phone', 'vb.vf_g2014', 'vb.tsmart_last_name', 'vb.tsmart_zip', + 'vb.tsmart_state', 'vb.voterbase_gender', 'vb.tsmart_city', 'vb.tsmart_partisan_score', + 'ts.tsmart_midterm_general_turnout_score', 'vb.voterbase_id', + 'vb.voterbase_registration_status', 'vb.vf_g2016', 'vb.tsmart_middle_name', + 'vb.tsmart_name_suffix' ] diff --git a/test/test_actblue/test_actblue.py b/test/test_actblue/test_actblue.py index 2132ed9878..6f0eaae21a 100644 --- a/test/test_actblue/test_actblue.py +++ b/test/test_actblue/test_actblue.py @@ -5,52 +5,51 @@ from unittest.mock import MagicMock -TEST_CLIENT_UUID = "someuuid" -TEST_CLIENT_SECRET = "somesecret" +TEST_CLIENT_UUID = 'someuuid' +TEST_CLIENT_SECRET = 'somesecret' -TEST_ID = "12345" -TEST_URI = "https://faketestingurl.com/example" +TEST_ID = '12345' +TEST_URI = 'https://faketestingurl.com/example' -TEST_CSV_TYPE = "refunded_contributions" -TEST_DATE_RANGE_START = "2017-07-07" -TEST_DATE_RANGE_END = "2017-08-07" +TEST_CSV_TYPE = 'refunded_contributions' +TEST_DATE_RANGE_START = '2017-07-07' +TEST_DATE_RANGE_END = '2017-08-07' -TEST_POST_RESPONSE = {"id": TEST_ID} +TEST_POST_RESPONSE = { + "id": TEST_ID +} TEST_DOWNLOAD_URL = "https://www.example.com/example.csv" TEST_GET_RESPONSE = { "id": TEST_ID, "download_url": TEST_DOWNLOAD_URL, - "status": "complete", + "status": "complete" } class TestActBlue(unittest.TestCase): + @requests_mock.Mocker() def setUp(self, m): self.ab = ActBlue(TEST_CLIENT_UUID, TEST_CLIENT_SECRET, TEST_URI) self.from_csv = Table.from_csv - test_csv_data = Table.from_csv_string( - open("test/test_actblue/test_csv_data.csv").read() - ) - Table.from_csv = MagicMock(name="mocked from_csv", return_value=test_csv_data) + test_csv_data = Table.from_csv_string(open('test/test_actblue/test_csv_data.csv').read()) + Table.from_csv = MagicMock(name='mocked from_csv', return_value=test_csv_data) def tearDown(self): Table.from_csv = self.from_csv @requests_mock.Mocker() def test_successful_post_request(self, m): - m.post(f"{TEST_URI}/csvs", json=TEST_POST_RESPONSE) + m.post(f'{TEST_URI}/csvs', json=TEST_POST_RESPONSE) - response = self.ab.post_request( - TEST_CSV_TYPE, TEST_DATE_RANGE_START, TEST_DATE_RANGE_END - ) - assert response["id"] == TEST_POST_RESPONSE["id"] + response = self.ab.post_request(TEST_CSV_TYPE, TEST_DATE_RANGE_START, TEST_DATE_RANGE_END) + assert response['id'] == TEST_POST_RESPONSE['id'] @requests_mock.Mocker() def test_successful_get_download_url(self, m): - m.get(f"{TEST_URI}/csvs/{TEST_ID}", json=TEST_GET_RESPONSE) + m.get(f'{TEST_URI}/csvs/{TEST_ID}', json=TEST_GET_RESPONSE) assert self.ab.get_download_url(csv_id=TEST_ID) == TEST_DOWNLOAD_URL @@ -59,25 +58,18 @@ def test_successful_poll_for_download_url(self, m): mocked_get_response_no_download_url = { "id": TEST_ID, "download_url": None, - "status": "in_progress", + "status": "in_progress" } - m.get( - f"{TEST_URI}/csvs/{TEST_ID}", - [ - {"json": mocked_get_response_no_download_url}, - {"json": TEST_GET_RESPONSE}, - ], - ) + m.get(f'{TEST_URI}/csvs/{TEST_ID}', [{'json': mocked_get_response_no_download_url}, + {'json': TEST_GET_RESPONSE}]) assert self.ab.poll_for_download_url(csv_id=TEST_ID) == TEST_DOWNLOAD_URL @requests_mock.Mocker() def test_successful_get_contributions(self, m): - m.post(f"{TEST_URI}/csvs", json=TEST_POST_RESPONSE) - m.get(f"{TEST_URI}/csvs/{TEST_ID}", json=TEST_GET_RESPONSE) + m.post(f'{TEST_URI}/csvs', json=TEST_POST_RESPONSE) + m.get(f'{TEST_URI}/csvs/{TEST_ID}', json=TEST_GET_RESPONSE) - table = self.ab.get_contributions( - TEST_CSV_TYPE, TEST_DATE_RANGE_START, TEST_DATE_RANGE_END - ) + table = self.ab.get_contributions(TEST_CSV_TYPE, TEST_DATE_RANGE_START, TEST_DATE_RANGE_END) assert test_columns_data.expected_table_columns == table.columns diff --git a/test/test_actblue/test_columns_data.py b/test/test_actblue/test_columns_data.py index 28dba65e12..4e464b41ca 100644 --- a/test/test_actblue/test_columns_data.py +++ b/test/test_actblue/test_columns_data.py @@ -89,5 +89,5 @@ "Shipping Country", "Weekly Recurring Amount", "Smart Boost Amount", - "Smart Boost Shown", + "Smart Boost Shown" ] diff --git a/test/test_action_builder/test_action_builder.py b/test/test_action_builder/test_action_builder.py deleted file mode 100644 index 57fcdfcb45..0000000000 --- a/test/test_action_builder/test_action_builder.py +++ /dev/null @@ -1,398 +0,0 @@ -import unittest -import requests_mock -import json -from parsons import Table, ActionBuilder -from test.utils import assert_matching_tables - - -class TestActionBuilder(unittest.TestCase): - @requests_mock.Mocker() - def setUp(self, m): - self.subdomain = "fake_subdomain" - self.campaign = "fake-campaign" - self.api_url = "https://{}.actionbuilder.org/api/rest/v1/campaigns/{}".format( - self.subdomain, self.campaign - ) - self.api_key = "fake_key" - - self.bldr = ActionBuilder( - api_token=self.api_key, subdomain=self.subdomain, campaign=self.campaign - ) - - self.fake_datetime = "2023-05-19T00:00:00.000Z" - self.fake_date = "2023-05-19" - - self.fake_tag_1 = "Fake Tag 1" - self.fake_tag_2 = "Fake Tag 2" - self.fake_tag_3 = "Fake Tag 3" - self.fake_tag_4 = "Fake Tag 3" - - self.fake_field_1 = "Fake Field 1" - self.fake_section = "Fake Section 1" - - self.fake_tags_list_1 = { - "per_page": 2, - "page": 1, - "total_pages": 9, - "_embedded": { - "osdi:tags": [ - { - "origin_system": "Action Builder", - "identifiers": ["action_builder:fake-action-builder-id-1"], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "name": self.fake_tag_1, - "action_builder:section": self.fake_section, - "action_builder:field": self.fake_field_1, - "action_builder:field_type": "standard", - "action_builder:locked": False, - "action_builder:allow_multiple_responses": False, - }, - { - "origin_system": "Action Builder", - "identifiers": ["action_builder:fake-action-builder-id-2"], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "name": self.fake_tag_2, - "action_builder:section": self.fake_section, - "action_builder:field": self.fake_field_1, - "action_builder:field_type": "standard", - "action_builder:locked": False, - "action_builder:allow_multiple_responses": False, - }, - ] - }, - } - - self.fake_tag_name_search_result = { - "per_page": 1, - "page": 1, - "total_pages": 1, - "_embedded": { - "osdi:tags": [ - { - "origin_system": "Action Builder", - "identifiers": ["action_builder:fake-action-builder-id-1"], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "name": self.fake_tag_1, - "action_builder:section": self.fake_section, - "action_builder:field": self.fake_field_1, - "action_builder:field_type": "standard", - "action_builder:locked": False, - "action_builder:allow_multiple_responses": False, - } - ] - }, - } - - self.fake_tags_list_2 = { - "per_page": 2, - "page": 2, - "total_pages": 9, - "_embedded": { - "osdi:tags": [ - { - "origin_system": "Action Builder", - "identifiers": ["action_builder:fake-action-builder-id-3"], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "name": self.fake_tag_3, - "action_builder:section": self.fake_section, - "action_builder:field": self.fake_field_1, - "action_builder:field_type": "standard", - "action_builder:locked": False, - "action_builder:allow_multiple_responses": False, - }, - { - "origin_system": "Action Builder", - "identifiers": ["action_builder:fake-action-builder-id-4"], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "name": self.fake_tag_4, - "action_builder:section": self.fake_section, - "action_builder:field": self.fake_field_1, - "action_builder:field_type": "standard", - "action_builder:locked": False, - "action_builder:allow_multiple_responses": False, - }, - ] - }, - } - - self.fake_tags_list = ( - self.fake_tags_list_1["_embedded"]["osdi:tags"] - + self.fake_tags_list_2["_embedded"]["osdi:tags"] - ) - - self.fake_field_values = { - "Fake Field 2": "Fake Tag 5", - self.fake_field_1: self.fake_tag_4, - } - - self.fake_tagging = [ - { - "action_builder:name": self.fake_tag_4, - "action_builder:field": self.fake_field_1, - "action_builder:section": self.fake_section, - }, - { - "action_builder:name": "Fake Tag 5", - "action_builder:field": "Fake Field 2", - "action_builder:section": self.fake_section, - }, - ] - - self.fake_entity_id = "fake-entity-id-1" - - self.fake_upserted_response = { - "origin_system": "Action Builder", - "identifiers": [f"action_builder:{self.fake_entity_id}"], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "action_builder:entity_type": "Person", - "given_name": "Fakey", - "family_name": "McFakerson", - "preferred_language": "en", - "email_addresses": [ - { - "action_builder:identifier": "action_builder:fake-email-id-1", - "address": "fakey@mcfakerson.com", - "address_type": "Work", - "status": "unsubscribed", - "source": "api", - } - ], - } - - self.fake_upsert_person = { - "person": { - "identifiers": [f"action_builder:{self.fake_entity_id}"], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "action_builder:entity_type": "Person", - "given_name": "Fakey", - "family_name": "McFakerson", - "preferred_language": "en", - "email_addresses": [ - { - "action_builder:identifier": "action_builder:fake-email-id-1", - "address": "fakey@mcfakerson.com", - "address_type": "Work", - "status": "unsubscribed", - } - ], - } - } - - self.fake_insert_person = { - "entity_type": "Person", - "data": { - "person": { - "given_name": "Fakey", - "family_name": "McFakerson", - "email_addresses": [ - {"address": "fakey@mcfakerson.com", "status": "unsubscribed"} - ], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - } - }, - } - - self.fake_update_person = { - k: v for k, v in self.fake_insert_person.items() if k != "entity_type" - } - self.fake_update_person["identifier"] = [ - f"action_builder:{self.fake_entity_id}" - ] - - self.fake_tag_id = "fake_tag_id" - self.fake_tagging_id = "fake_tagging_id" - self.fake_remove_tag_resp = { - "message": "Tag has been removed from Taggable Logbook" - } - - self.fake_connection = {"person_id": "fake-entity-id-2"} - - @requests_mock.Mocker() - def test_get_page(self, m): - m.get( - f"{self.api_url}/tags?page=2&per_page=2", - text=json.dumps(self.fake_tags_list_2), - ) - self.assertEqual( - self.bldr._get_page(self.campaign, "tags", 2, 2), self.fake_tags_list_2 - ) - - @requests_mock.Mocker() - def test_get_all_records(self, m): - m.get( - f"{self.api_url}/tags?page=1&per_page=25", - text=json.dumps(self.fake_tags_list_1), - ) - m.get( - f"{self.api_url}/tags?page=2&per_page=25", - text=json.dumps(self.fake_tags_list_2), - ) - m.get( - f"{self.api_url}/tags?page=3&per_page=25", - text=json.dumps({"_embedded": {"osdi:tags": []}}), - ) - assert_matching_tables( - self.bldr._get_all_records(self.campaign, "tags"), - Table(self.fake_tags_list), - ) - - @requests_mock.Mocker() - def test_get_campaign_tags(self, m): - m.get( - f"{self.api_url}/tags?page=1&per_page=25", - text=json.dumps(self.fake_tags_list_1), - ) - m.get( - f"{self.api_url}/tags?page=2&per_page=25", - text=json.dumps(self.fake_tags_list_2), - ) - m.get( - f"{self.api_url}/tags?page=3&per_page=25", - text=json.dumps({"_embedded": {"osdi:tags": []}}), - ) - assert_matching_tables( - self.bldr.get_campaign_tags(), Table(self.fake_tags_list) - ) - - @requests_mock.Mocker() - def test_get_tag_by_name(self, m): - m.get( - f"{self.api_url}/tags?filter=name eq '{self.fake_tag_1}'", - text=json.dumps(self.fake_tag_name_search_result), - ) - m.get( - f"{self.api_url}/tags?page=2&per_page=25&filter=name eq '{self.fake_tag_1}'", - text=json.dumps({"_embedded": {"osdi:tags": []}}), - ) - assert_matching_tables( - self.bldr.get_tag_by_name(self.fake_tag_1), - Table([self.fake_tags_list_1["_embedded"]["osdi:tags"][0]]), - ) - - def prepare_dict_key_intersection(self, dict1, dict2): - # Internal method to compare a reference dict to a new incoming one, keeping only common - # keys whose values are not lists (i.e. nested). - - common_keys = { - key - for key, value in dict1.items() - if key in dict2 and not isinstance(value, list) - } - - dict1_comp = {key: value for key, value in dict1.items() if key in common_keys} - - dict2_comp = {key: value for key, value in dict2.items() if key in common_keys} - - return dict1_comp, dict2_comp - - @requests_mock.Mocker() - def test_upsert_entity(self, m): - m.post(f"{self.api_url}/people", text=json.dumps(self.fake_upserted_response)) - - # Flatten and remove items added for spreadable arguments - upsert_person = self.fake_upsert_person["person"] - upsert_response = self.bldr._upsert_entity( - self.fake_upsert_person, self.campaign - ) - - person_comp, upsert_response_comp = self.prepare_dict_key_intersection( - upsert_person, upsert_response - ) - - upsert_email = upsert_person["email_addresses"][0] - response_email = upsert_response["email_addresses"][0] - - email_comp, response_email_comp = self.prepare_dict_key_intersection( - upsert_email, response_email - ) - - self.assertEqual(person_comp, upsert_response_comp) - self.assertEqual(email_comp, response_email_comp) - - @requests_mock.Mocker() - def test_insert_entity_record(self, m): - m.post(f"{self.api_url}/people", text=json.dumps(self.fake_upserted_response)) - - # Flatten and remove items added for spreadable arguments - insert_person = { - **{k: v for k, v in self.fake_insert_person.items() if k != "data"}, - **self.fake_insert_person["data"]["person"], - } - insert_response = self.bldr.insert_entity_record(**self.fake_insert_person) - - person_comp, insert_response_comp = self.prepare_dict_key_intersection( - insert_person, insert_response - ) - - self.assertEqual(person_comp, insert_response_comp) - - @requests_mock.Mocker() - def test_update_entity_record(self, m): - m.post(f"{self.api_url}/people", text=json.dumps(self.fake_upserted_response)) - - # Flatten and remove items added for spreadable arguments - update_person = { - **{k: v for k, v in self.fake_update_person.items() if k != "data"}, - **self.fake_update_person["data"]["person"], - } - update_response = self.bldr.update_entity_record(**self.fake_update_person) - - person_comp, update_response_comp = self.prepare_dict_key_intersection( - update_person, update_response - ) - - self.assertEqual(person_comp, update_response_comp) - - def tagging_callback(self, request, context): - # Internal method for returning the constructed tag data to test - - post_data = request.json() - tagging_data = post_data["add_tags"] - - # Force the sort to allow for predictable comparison - return sorted(tagging_data, key=lambda k: k["action_builder:name"]) - - @requests_mock.Mocker() - def test_add_section_field_values_to_record(self, m): - m.post(f"{self.api_url}/people", json=self.tagging_callback) - add_tags_response = self.bldr.add_section_field_values_to_record( - self.fake_entity_id, self.fake_section, self.fake_field_values - ) - self.assertEqual(add_tags_response, self.fake_tagging) - - @requests_mock.Mocker() - def test_remove_tagging(self, m): - m.delete( - f"{self.api_url}/tags/{self.fake_tag_id}/taggings/{self.fake_tagging_id}", - json=self.fake_remove_tag_resp, - ) - remove_tag_resp = self.bldr.remove_tagging( - tag_id=self.fake_tag_id, tagging_id=self.fake_tagging_id - ) - self.assertEqual(remove_tag_resp, self.fake_remove_tag_resp) - - def connect_callback(self, request, context): - # Internal method for returning constructed connection data to test - - post_data = request.json() - connection_data = post_data["connection"] - return connection_data - - @requests_mock.Mocker() - def test_upsert_connection(self, m): - m.post( - f"{self.api_url}/people/{self.fake_entity_id}/connections", - json=self.connect_callback, - ) - connect_response = self.bldr.upsert_connection( - [self.fake_entity_id, "fake-entity-id-2"] - ) - self.assertEqual(connect_response, self.fake_connection) diff --git a/test/test_action_kit.py b/test/test_action_kit.py index 93dd20e96e..c2ce6b08c3 100644 --- a/test/test_action_kit.py +++ b/test/test_action_kit.py @@ -2,21 +2,25 @@ import os import unittest from unittest import mock -from parsons import ActionKit, Table +from parsons.action_kit.action_kit import ActionKit +from parsons.etl.table import Table from test.utils import assert_matching_tables ENV_PARAMETERS = { - "ACTION_KIT_DOMAIN": "env_domain", - "ACTION_KIT_USERNAME": "env_username", - "ACTION_KIT_PASSWORD": "env_password", + 'ACTION_KIT_DOMAIN': 'env_domain', + 'ACTION_KIT_USERNAME': 'env_username', + 'ACTION_KIT_PASSWORD': 'env_password' } class TestActionKit(unittest.TestCase): + def setUp(self): self.actionkit = ActionKit( - domain="domain.actionkit.com", username="user", password="password" + domain='domain.actionkit.com', + username='user', + password='password' ) self.actionkit.conn = mock.MagicMock() @@ -24,47 +28,36 @@ def tearDown(self): pass @mock.patch.dict(os.environ, ENV_PARAMETERS) - def test_from_environ(self): + def test_from_envrion(self): actionkit = ActionKit() - self.assertEqual(actionkit.domain, "env_domain") - self.assertEqual(actionkit.username, "env_username") - self.assertEqual(actionkit.password, "env_password") + self.assertEqual(actionkit.domain, 'env_domain') + self.assertEqual(actionkit.username, 'env_username') + self.assertEqual(actionkit.password, 'env_password') def test_base_endpoint(self): # Test the endpoint - url = self.actionkit._base_endpoint("user") - self.assertEqual(url, "https://domain.actionkit.com/rest/v1/user/") - - url = self.actionkit._base_endpoint("user", 1234) - self.assertEqual(url, "https://domain.actionkit.com/rest/v1/user/1234/") - - url = self.actionkit._base_endpoint("user", "1234") - self.assertEqual(url, "https://domain.actionkit.com/rest/v1/user/1234/") - - def test_delete_actionfield(self): - # Test delete actionfield + url = self.actionkit._base_endpoint('user') + self.assertEqual(url, 'https://domain.actionkit.com/rest/v1/user/') - # Mock resp and status code - resp_mock = mock.MagicMock() - type(resp_mock.patch()).status_code = mock.PropertyMock(return_value=204) - self.actionkit.conn = resp_mock + url = self.actionkit._base_endpoint('user', 1234) + self.assertEqual(url, 'https://domain.actionkit.com/rest/v1/user/1234/') - self.actionkit.delete_actionfield(123) - self.actionkit.conn.delete.assert_called_with( - "https://domain.actionkit.com/rest/v1/actionfield/123/", - ) + url = self.actionkit._base_endpoint('user', '1234') + self.assertEqual(url, 'https://domain.actionkit.com/rest/v1/user/1234/') def test_get_user(self): # Test get user self.actionkit.get_user(123) self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/user/123/", params=None + 'https://domain.actionkit.com/rest/v1/user/123/', + params=None ) def test_get_user_fields(self): self.actionkit.get_user_fields() self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/user/schema/", params=None + 'https://domain.actionkit.com/rest/v1/user/schema/', + params=None ) def test_create_user(self): @@ -75,10 +68,10 @@ def test_create_user(self): type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) self.actionkit.conn = resp_mock - self.actionkit.create_user(email="test") + self.actionkit.create_user(email='test') self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/user/", - data=json.dumps({"email": "test"}), + 'https://domain.actionkit.com/rest/v1/user/', + data=json.dumps({'email': 'test'}) ) def test_update_user(self): @@ -89,10 +82,10 @@ def test_update_user(self): type(resp_mock.patch()).status_code = mock.PropertyMock(return_value=202) self.actionkit.conn = resp_mock - self.actionkit.update_user(123, last_name="new name") + self.actionkit.update_user(123, last_name='new name') self.actionkit.conn.patch.assert_called_with( - "https://domain.actionkit.com/rest/v1/user/123/", - data=json.dumps({"last_name": "new name"}), + 'https://domain.actionkit.com/rest/v1/user/123/', + data=json.dumps({'last_name': 'new name'}) ) def test_update_event(self): @@ -102,51 +95,10 @@ def test_update_event(self): resp_mock = mock.MagicMock() type(resp_mock.patch()).status_code = mock.PropertyMock(return_value=202) self.actionkit.conn = resp_mock - self.actionkit.update_event(123, is_approved="test") + self.actionkit.update_event(123, is_approved='test') self.actionkit.conn.patch.assert_called_with( - "https://domain.actionkit.com/rest/v1/event/123/", - data=json.dumps({"is_approved": "test"}), - ) - - def test_get_blackholed_email(self): - # Test get blackholed email - resp_mock = mock.MagicMock() - type(resp_mock.get()).status_code = mock.PropertyMock(return_value=201) - type(resp_mock.get()).json = lambda x: {"meta": {"next": ""}, "objects": []} - self.actionkit.conn = resp_mock - - self.actionkit.get_blackholed_email("test") - self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/blackholedemail/", - params={"email": "test", "_limit": 100}, - ) - - def test_blackhole_email(self): - # Test blackhole email - - # Mock resp and status code - resp_mock = mock.MagicMock() - type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) - self.actionkit.conn = resp_mock - - self.actionkit.blackhole_email(email="test") - self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/blackholedemail/", - data=json.dumps({"email": "test"}), - ) - - def test_delete_user_data(self): - # Test delete user data - - # Mock resp and status code - resp_mock = mock.MagicMock() - type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) - self.actionkit.conn = resp_mock - - self.actionkit.delete_user_data(email="test") - self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/eraser/", - data=json.dumps({"email": "test"}), + 'https://domain.actionkit.com/rest/v1/event/123/', + data=json.dumps({'is_approved': 'test'}) ) def test_delete_user(self): @@ -159,14 +111,15 @@ def test_delete_user(self): self.actionkit.delete_user(123) self.actionkit.conn.delete.assert_called_with( - "https://domain.actionkit.com/rest/v1/user/123/", + 'https://domain.actionkit.com/rest/v1/user/123/', ) def test_get_campaign(self): # Test get campaign self.actionkit.get_campaign(123) self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/campaign/123/", params=None + 'https://domain.actionkit.com/rest/v1/campaign/123/', + params=None ) def test_create_campaign(self): @@ -177,37 +130,29 @@ def test_create_campaign(self): type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) self.actionkit.conn = resp_mock - self.actionkit.create_campaign(name="new_campaign", field="field") + self.actionkit.create_campaign(name='new_campaign', field='field') self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/campaign/", - data=json.dumps({"name": "new_campaign", "field": "field"}), + 'https://domain.actionkit.com/rest/v1/campaign/', + data=json.dumps({ + 'name': 'new_campaign', + 'field': 'field' + }) ) def test_get_event(self): # Test get event self.actionkit.get_event(1) self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/event/1/", params=None - ) - - def test_get_events(self): - # Test get events - resp_mock = mock.MagicMock() - type(resp_mock.get()).status_code = mock.PropertyMock(return_value=201) - type(resp_mock.get()).json = lambda x: {"meta": {"next": ""}, "objects": []} - self.actionkit.conn = resp_mock - - self.actionkit.get_events(100, order_by="created_at") - self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/event/", - params={"order_by": "created_at", "_limit": 100}, + 'https://domain.actionkit.com/rest/v1/event/1/', + params=None ) def test_get_event_create_page(self): # Test get event create page self.actionkit.get_event_create_page(123) self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/eventcreatepage/123/", params=None + 'https://domain.actionkit.com/rest/v1/eventcreatepage/123/', + params=None ) def test_create_event_create_page(self): @@ -219,24 +164,25 @@ def test_create_event_create_page(self): self.actionkit.conn = resp_mock self.actionkit.create_event_create_page( - name="new_page", campaign_id="123", title="title" + name='new_page', + campaign_id='123', + title='title' ) self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/eventcreatepage/", - data=json.dumps( - { - "campaign": "/rest/v1/campaign/123/", - "name": "new_page", - "title": "title", - } - ), + 'https://domain.actionkit.com/rest/v1/eventcreatepage/', + data=json.dumps({ + 'campaign': '/rest/v1/campaign/123/', + 'name': 'new_page', + 'title': 'title' + }) ) def test_get_event_create_form(self): # Test get event create form self.actionkit.get_event_create_form(123) self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/eventcreateform/123/", params=None + 'https://domain.actionkit.com/rest/v1/eventcreateform/123/', + params=None ) def test_create_event_create_form(self): @@ -248,20 +194,23 @@ def test_create_event_create_form(self): self.actionkit.conn = resp_mock self.actionkit.create_event_create_form( - page_id="123", thank_you_text="thank you" + page_id='123', + thank_you_text='thank you' ) self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/eventcreateform/", - data=json.dumps( - {"page": "/rest/v1/eventcreatepage/123/", "thank_you_text": "thank you"} - ), + 'https://domain.actionkit.com/rest/v1/eventcreateform/', + data=json.dumps({ + 'page': '/rest/v1/eventcreatepage/123/', + 'thank_you_text': 'thank you' + }) ) def test_get_event_signup_page(self): # Test get event signup page self.actionkit.get_event_signup_page(123) self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/eventsignuppage/123/", params=None + 'https://domain.actionkit.com/rest/v1/eventsignuppage/123/', + params=None ) def test_create_event_signup_page(self): @@ -273,24 +222,25 @@ def test_create_event_signup_page(self): self.actionkit.conn = resp_mock self.actionkit.create_event_signup_page( - name="new_name", campaign_id="123", title="title" + name='new_name', + campaign_id='123', + title='title' ) self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/eventsignuppage/", - data=json.dumps( - { - "campaign": "/rest/v1/campaign/123/", - "name": "new_name", - "title": "title", - } - ), + 'https://domain.actionkit.com/rest/v1/eventsignuppage/', + data=json.dumps({ + 'campaign': '/rest/v1/campaign/123/', + 'name': 'new_name', + 'title': 'title' + }) ) def test_get_event_signup_form(self): # Test get event signup form self.actionkit.get_event_signup_form(123) self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/eventsignupform/123/", params=None + 'https://domain.actionkit.com/rest/v1/eventsignupform/123/', + params=None ) def test_create_event_signup_form(self): @@ -302,13 +252,15 @@ def test_create_event_signup_form(self): self.actionkit.conn = resp_mock self.actionkit.create_event_signup_form( - page_id="123", thank_you_text="thank you" + page_id='123', + thank_you_text='thank you' ) self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/eventsignupform/", - data=json.dumps( - {"page": "/rest/v1/page/123/", "thank_you_text": "thank you"} - ), + 'https://domain.actionkit.com/rest/v1/eventsignupform/', + data=json.dumps({ + 'page': '/rest/v1/page/123/', + 'thank_you_text': 'thank you' + }) ) def test_update_event_signup(self): @@ -318,17 +270,18 @@ def test_update_event_signup(self): resp_mock = mock.MagicMock() type(resp_mock.patch()).status_code = mock.PropertyMock(return_value=202) self.actionkit.conn = resp_mock - self.actionkit.update_event_signup(123, email="test") + self.actionkit.update_event_signup(123, email='test') self.actionkit.conn.patch.assert_called_with( - "https://domain.actionkit.com/rest/v1/eventsignup/123/", - data=json.dumps({"email": "test"}), + 'https://domain.actionkit.com/rest/v1/eventsignup/123/', + data=json.dumps({'email': 'test'}) ) def test_get_mailer(self): # Test get mailer self.actionkit.get_mailer(123) self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/mailer/123/", params=None + 'https://domain.actionkit.com/rest/v1/mailer/123/', + params=None ) def test_create_mailer(self): @@ -340,19 +293,14 @@ def test_create_mailer(self): self.actionkit.conn = resp_mock self.actionkit.create_mailer( - fromline="test ", - subjects=["test1", "test2"], - html="

test

", + fromline='test ', subjects=['test1', 'test2'], html='

test

' ) self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/mailer/", - data=json.dumps( - { - "fromline": "test ", - "subjects": ["test1", "test2"], - "html": "

test

", - } - ), + 'https://domain.actionkit.com/rest/v1/mailer/', + data=json.dumps({ + 'fromline': 'test ', 'subjects': ['test1', 'test2'], + 'html': '

test

' + }) ) def test_rebuild_mailer(self): @@ -365,8 +313,8 @@ def test_rebuild_mailer(self): self.actionkit.rebuild_mailer(123) self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/mailer/123/rebuild/", - data=json.dumps({}), + 'https://domain.actionkit.com/rest/v1/mailer/123/rebuild/', + data=json.dumps({}) ) def test_queue_mailer(self): @@ -379,73 +327,8 @@ def test_queue_mailer(self): self.actionkit.queue_mailer(123) self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/mailer/123/queue/", - data=json.dumps({}), - ) - - def test_paginated_get(self): - # Test paginated_get - resp_mock = mock.MagicMock() - first_mock = mock.MagicMock() - second_mock = mock.MagicMock() - first_mock.status_code = 201 - first_mock.json = lambda: { - "meta": {"next": "/rest/v1/user/abc"}, - "objects": list(map(lambda x: {"value": x}, [*range(100)])), - } - second_mock.status_code = 201 - second_mock.json = lambda: { - "meta": {"next": "/rest/v1/user/def"}, - "objects": list(map(lambda x: {"value": x}, [*range(100, 200)])), - } - resp_mock.get.side_effect = [first_mock, second_mock] - self.actionkit.conn = resp_mock - results = self.actionkit.paginated_get("user", 150, order_by="created_at") - self.assertEqual(results.num_rows, 150) - calls = [ - unittest.mock.call( - "https://domain.actionkit.com/rest/v1/user/", - params={"order_by": "created_at", "_limit": 100}, - ), - unittest.mock.call("https://domain.actionkit.com/rest/v1/user/abc"), - ] - self.actionkit.conn.get.assert_has_calls(calls) - - def test_paginated_get_custom_limit(self): - # Test paginated_get - resp_mock = mock.MagicMock() - first_mock = mock.MagicMock() - second_mock = mock.MagicMock() - first_mock.status_code = 201 - first_mock.json = lambda: { - "meta": {"next": "/rest/v1/user/abc"}, - "objects": list(map(lambda x: {"value": x}, [*range(100)])), - } - second_mock.status_code = 201 - second_mock.json = lambda: { - "meta": {"next": "/rest/v1/user/def"}, - "objects": list(map(lambda x: {"value": x}, [*range(100, 200)])), - } - resp_mock.get.side_effect = [first_mock, second_mock] - self.actionkit.conn = resp_mock - results = self.actionkit.paginated_get_custom_limit("user", 150, "value", 102) - self.assertEqual(results.num_rows, 102) - self.assertEqual(results.column_data("value")[0], 0) - self.assertEqual(results.column_data("value")[-1], 101) - calls = [ - unittest.mock.call( - "https://domain.actionkit.com/rest/v1/user/", - params={"order_by": "value", "_limit": 100}, - ), - unittest.mock.call("https://domain.actionkit.com/rest/v1/user/abc"), - ] - self.actionkit.conn.get.assert_has_calls(calls) - - def test_get_order(self): - # Test get order - self.actionkit.get_order(123) - self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/order/123/", params=None + 'https://domain.actionkit.com/rest/v1/mailer/123/queue/', + data=json.dumps({}) ) def test_update_order(self): @@ -455,44 +338,17 @@ def test_update_order(self): resp_mock = mock.MagicMock() type(resp_mock.patch()).status_code = mock.PropertyMock(return_value=202) self.actionkit.conn = resp_mock - self.actionkit.update_order(123, account="test") + self.actionkit.update_order(123, account='test') self.actionkit.conn.patch.assert_called_with( - "https://domain.actionkit.com/rest/v1/order/123/", - data=json.dumps({"account": "test"}), - ) - - def test_get_orders(self): - # Test get orders - resp_mock = mock.MagicMock() - type(resp_mock.get()).status_code = mock.PropertyMock(return_value=201) - type(resp_mock.get()).json = lambda x: {"meta": {"next": ""}, "objects": []} - self.actionkit.conn = resp_mock - - self.actionkit.get_orders(100, order_by="created_at") - self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/order/", - params={"order_by": "created_at", "_limit": 100}, - ) - - def test_update_paymenttoken(self): - # Test update payment token - - # Mock resp and status code - resp_mock = mock.MagicMock() - type(resp_mock.patch()).status_code = mock.PropertyMock(return_value=202) - self.actionkit.conn = resp_mock - - self.actionkit.update_paymenttoken(1, status="inactive") - self.actionkit.conn.patch.assert_called_with( - "https://domain.actionkit.com/rest/v1/paymenttoken/1/", - data=json.dumps({"status": "inactive"}), + 'https://domain.actionkit.com/rest/v1/order/123/', data=json.dumps({'account': 'test'}) ) def test_get_page_followup(self): # Test get page followup self.actionkit.get_page_followup(123) self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/pagefollowup/123/", params=None + 'https://domain.actionkit.com/rest/v1/pagefollowup/123/', + params=None ) def test_create_page_followup(self): @@ -503,17 +359,23 @@ def test_create_page_followup(self): type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) self.actionkit.conn = resp_mock - self.actionkit.create_page_followup(signup_page_id="123", url="url") + self.actionkit.create_page_followup( + signup_page_id='123', + url='url' + ) self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/pagefollowup/", - data=json.dumps({"page": "/rest/v1/eventsignuppage/123/", "url": "url"}), + 'https://domain.actionkit.com/rest/v1/pagefollowup/', + data=json.dumps({ + 'page': '/rest/v1/eventsignuppage/123/', + 'url': 'url' + }) ) def test_get_survey_question(self): # Test get survey question self.actionkit.get_survey_question(123) self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/surveyquestion/123/", params=None + 'https://domain.actionkit.com/rest/v1/surveyquestion/123/', params=None ) def test_update_survey_question(self): @@ -523,87 +385,10 @@ def test_update_survey_question(self): resp_mock = mock.MagicMock() type(resp_mock.patch()).status_code = mock.PropertyMock(return_value=202) self.actionkit.conn = resp_mock - self.actionkit.update_survey_question(123, question_html="test") - self.actionkit.conn.patch.assert_called_with( - "https://domain.actionkit.com/rest/v1/surveyquestion/123/", - data=json.dumps({"question_html": "test"}), - ) - - def test_get_orderrecurring(self): - # Test get orderrecurring - self.actionkit.get_orderrecurring(123) - self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/orderrecurring/123/", params=None - ) - - def test_cancel_orderrecurring(self): - # Test cancel recurring order - - # Mock resp and status code - resp_mock = mock.MagicMock() - type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) - self.actionkit.conn = resp_mock - - self.actionkit.cancel_orderrecurring(1) - self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/orderrecurring/1/cancel/" - ) - - def test_update_orderrecurring(self): - # Test update orderrecurring - - # Mock resp and status code - resp_mock = mock.MagicMock() - type(resp_mock.patch()).status_code = mock.PropertyMock(return_value=202) - self.actionkit.conn = resp_mock - self.actionkit.update_orderrecurring(123, amount="1.00") + self.actionkit.update_survey_question(123, question_html='test') self.actionkit.conn.patch.assert_called_with( - "https://domain.actionkit.com/rest/v1/orderrecurring/123/", - data=json.dumps({"amount": "1.00"}), - ) - - def test_create_transaction(self): - # Test create transaction - - # Mock resp and status code - resp_mock = mock.MagicMock() - type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) - self.actionkit.conn = resp_mock - - self.actionkit.create_transaction( - account="Account", - amount=1, - amount_converted=1, - currency="USD", - failure_code="", - failure_description="", - failure_message="", - order="/rest/v1/order/1/", - status="completed", - success=True, - test_mode=False, - trans_id="abc123", - type="sale", - ) - self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/transaction/", - data=json.dumps( - { - "account": "Account", - "amount": 1, - "amount_converted": 1, - "currency": "USD", - "failure_code": "", - "failure_description": "", - "failure_message": "", - "order": "/rest/v1/order/1/", - "status": "completed", - "success": True, - "test_mode": False, - "trans_id": "abc123", - "type": "sale", - } - ), + 'https://domain.actionkit.com/rest/v1/surveyquestion/123/', + data=json.dumps({'question_html': 'test'}) ) def test_update_transaction(self): @@ -613,23 +398,10 @@ def test_update_transaction(self): resp_mock = mock.MagicMock() type(resp_mock.patch()).status_code = mock.PropertyMock(return_value=202) self.actionkit.conn = resp_mock - self.actionkit.update_transaction(123, account="test") + self.actionkit.update_transaction(123, account='test') self.actionkit.conn.patch.assert_called_with( - "https://domain.actionkit.com/rest/v1/transaction/123/", - data=json.dumps({"account": "test"}), - ) - - def test_get_transactions(self): - # Test get transactions - resp_mock = mock.MagicMock() - type(resp_mock.get()).status_code = mock.PropertyMock(return_value=201) - type(resp_mock.get()).json = lambda x: {"meta": {"next": ""}, "objects": []} - self.actionkit.conn = resp_mock - - self.actionkit.get_transactions(100, order_by="created_at") - self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/transaction/", - params={"order_by": "created_at", "_limit": 100}, + 'https://domain.actionkit.com/rest/v1/transaction/123/', + data=json.dumps({'account': 'test'}) ) def test_create_generic_action(self): @@ -639,83 +411,63 @@ def test_create_generic_action(self): type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) self.actionkit.conn = resp_mock - self.actionkit.create_generic_action(email="bob@bob.com", page="my_action") + self.actionkit.create_generic_action(email='bob@bob.com', page='my_action') self.actionkit.conn.post.assert_called_with( - "https://domain.actionkit.com/rest/v1/action/", - data=json.dumps({"email": "bob@bob.com", "page": "my_action"}), - ) + 'https://domain.actionkit.com/rest/v1/action/', + data=json.dumps({'email': 'bob@bob.com', 'page': 'my_action'})) def test_bulk_upload_table(self): resp_mock = mock.MagicMock() type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) self.actionkit._conn = lambda self: resp_mock self.actionkit.bulk_upload_table( - Table( - [ - ("user_id", "user_customfield1", "action_foo"), - (5, "yes", "123 Main St"), - ] - ), - "fake_page", - ) + Table([('user_id', 'user_customfield1', 'action_foo'), (5, 'yes', '123 Main St')]), + 'fake_page') self.assertEqual(resp_mock.post.call_count, 2) name, args, kwargs = resp_mock.method_calls[1] - self.assertEqual( - kwargs["data"], - {"page": "fake_page", "autocreate_user_fields": 0, "user_fields_only": 0}, - ) - upload_data = kwargs["files"]["upload"].read() - self.assertEqual( - upload_data.decode(), - "user_id,user_customfield1,action_foo\r\n5,yes,123 Main St\r\n", - ) + self.assertEqual(kwargs['data'], + {'page': 'fake_page', 'autocreate_user_fields': 0, 'user_fields_only': 0}) + upload_data = kwargs['files']['upload'].read() + self.assertEqual(upload_data.decode(), + 'user_id,user_customfield1,action_foo\r\n5,yes,123 Main St\r\n') def test_bulk_upload_table_userfields(self): resp_mock = mock.MagicMock() type(resp_mock.post()).status_code = mock.PropertyMock(return_value=201) self.actionkit._conn = lambda self: resp_mock self.actionkit.bulk_upload_table( - Table([("user_id", "user_customfield1"), (5, "yes")]), "fake_page" - ) + Table([('user_id', 'user_customfield1'), (5, 'yes')]), + 'fake_page') self.assertEqual(resp_mock.post.call_count, 2) name, args, kwargs = resp_mock.method_calls[1] - self.assertEqual( - kwargs["data"], - {"page": "fake_page", "autocreate_user_fields": 0, "user_fields_only": 1}, - ) - self.assertEqual( - kwargs["files"]["upload"].read().decode(), - "user_id,user_customfield1\r\n5,yes\r\n", - ) + self.assertEqual(kwargs['data'], + {'page': 'fake_page', 'autocreate_user_fields': 0, 'user_fields_only': 1}) + self.assertEqual(kwargs['files']['upload'].read().decode(), + 'user_id,user_customfield1\r\n5,yes\r\n') def test_table_split(self): - test1 = Table([("x", "y", "z"), ("a", "b", ""), ("1", "", "3"), ("4", "", "6")]) + test1 = Table([('x', 'y', 'z'), ('a', 'b', ''), ('1', '', '3'), ('4', '', '6')]) tables = self.actionkit._split_tables_no_empties(test1, True, []) self.assertEqual(len(tables), 2) - assert_matching_tables(tables[0], Table([("x", "y"), ("a", "b")])) - assert_matching_tables(tables[1], Table([("x", "z"), ("1", "3"), ("4", "6")])) + assert_matching_tables(tables[0], Table([('x', 'y'), ('a', 'b')])) + assert_matching_tables(tables[1], Table([('x', 'z'), ('1', '3'), ('4', '6')])) - test2 = Table( - [("x", "y", "z"), ("a", "b", "c"), ("1", "2", "3"), ("4", "5", "6")] - ) + test2 = Table([('x', 'y', 'z'), ('a', 'b', 'c'), ('1', '2', '3'), ('4', '5', '6')]) tables2 = self.actionkit._split_tables_no_empties(test2, True, []) self.assertEqual(len(tables2), 1) assert_matching_tables(tables2[0], test2) - test3 = Table( - [("x", "y", "z"), ("a", "b", ""), ("1", "2", "3"), ("4", "5", "6")] - ) - tables3 = self.actionkit._split_tables_no_empties(test3, False, ["z"]) + test3 = Table([('x', 'y', 'z'), ('a', 'b', ''), ('1', '2', '3'), ('4', '5', '6')]) + tables3 = self.actionkit._split_tables_no_empties(test3, False, ['z']) self.assertEqual(len(tables3), 2) - assert_matching_tables(tables3[0], Table([("x", "y"), ("a", "b")])) - assert_matching_tables( - tables3[1], Table([("x", "y", "z"), ("1", "2", "3"), ("4", "5", "6")]) - ) + assert_matching_tables(tables3[0], Table([('x', 'y'), ('a', 'b')])) + assert_matching_tables(tables3[1], + Table([('x', 'y', 'z'), ('1', '2', '3'), ('4', '5', '6')])) def test_collect_errors(self): - self.actionkit.collect_upload_errors([{"id": "12345"}]) + self.actionkit.collect_upload_errors([{'id': '12345'}]) self.actionkit.conn.get.assert_called_with( - "https://domain.actionkit.com/rest/v1/uploaderror/", - params={"upload": "12345"}, + 'https://domain.actionkit.com/rest/v1/uploaderror/', + params={'upload': '12345'} ) diff --git a/test/test_action_network/test_action_network.py b/test/test_action_network/test_action_network.py index c6984d3861..1a8a5ef294 100644 --- a/test/test_action_network/test_action_network.py +++ b/test/test_action_network/test_action_network.py @@ -6,439 +6,270 @@ class TestActionNetwork(unittest.TestCase): + @requests_mock.Mocker() def setUp(self, m): - self.api_url = "https://actionnetwork.org/api/v2" + self.api_url = 'https://actionnetwork.org/api/v2' self.api_key = "fake_key" self.an = ActionNetwork(self.api_key) - self.fake_datetime = "2019-02-29T00:00:00.000+0000" - self.fake_date = "2019-02-29" - self.fake_customer_email_1 = "fake_customer_email_1@fake_customer_email.com" - self.fake_customer_email_2 = "fake_customer_email_2@fake_customer_email.com" - self.fake_filter_by_email_1 = f"filter eq '{self.fake_customer_email_1}'" - self.fake_person_id_1 = "action_network:fake_person_id_1" - self.fake_person_id_2 = "action_network:fake_person_id_2" + self.fake_datetime = '2019-02-29T00:00:00.000+0000' + self.fake_date = '2019-02-29' + self.fake_customer_email_1 = 'fake_customer_email_1@fake_customer_email.com' + self.fake_customer_email_2 = 'fake_customer_email_2@fake_customer_email.com' + self.fake_person_id_1 = "fake_person_id_1" + self.fake_person_id_2 = "fake_person_id_2" self.fake_tag_id_1 = "fake_tag_id_1" self.fake_tag_id_2 = "fake_tag_id_2" - self.fake_tag_filter = "name eq 'fake_tag_1'" self.fake_people_list_1 = { - "per_page": 2, - "page": 1, - "_links": { - "next": {"href": f"{self.api_url}/people?page=2"}, - "osdi:people": [ - {"href": f"{self.api_url}/{self.fake_person_id_1}"}, - {"href": f"{self.api_url}/{self.fake_person_id_2}"}, - ], - "curies": [ - {"name": "osdi", "templated": True}, - {"name": "action_network", "templated": True}, - ], - "self": {"href": f"{self.api_url}/people"}, - }, - "_embedded": { - "osdi:people": [ - { - "given_name": "Fakey", - "family_name": "McFakerson", - "identifiers": [self.fake_person_id_1], - "email_addresses": [ - { - "primary": True, - "address": self.fake_customer_email_1, - "status": "subscribed", - } - ], - "postal_addresses": [ - { - "primary": True, - "region": "", - "country": "US", - "location": { - "latitude": None, - "longitude": None, - "accuracy": None, - }, - } - ], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "languages_spoken": ["en"], - }, - { - "given_name": "Faker", - "family_name": "McEvenFakerson", - "identifiers": [self.fake_person_id_2], - "email_addresses": [ - { - "primary": True, - "address": self.fake_customer_email_2, - "status": "subscribed", - } - ], - "postal_addresses": [ - { - "primary": True, - "region": "", - "country": "US", - "location": { - "latitude": None, - "longitude": None, - "accuracy": None, - }, - } - ], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "languages_spoken": ["en"], - }, - ] - }, - } + 'per_page': 2, + 'page': 1, + '_links': {'next': {'href': f"{self.api_url}/people?page=2"}, + 'osdi:people': [{'href': f"{self.api_url}/{self.fake_person_id_1}"}, + {'href': f"{self.api_url}/{self.fake_person_id_2}"}], + 'curies': [{'name': 'osdi', + 'templated': True}, + {'name': 'action_network', + 'templated': True}], + 'self': {'href': f"{self.api_url}/people"}}, + '_embedded': {'osdi:people': [{'given_name': 'Fakey', + 'family_name': 'McFakerson', + 'identifiers': [self.fake_person_id_1], + 'email_addresses': [{'primary': True, + 'address': self.fake_customer_email_1, + 'status': 'subscribed'}], + 'postal_addresses': [{'primary': True, + 'region': '', + 'country': 'US', + 'location': {'latitude': None, + 'longitude': None, + 'accuracy': None}}], + 'created_date': self.fake_datetime, + 'modified_date': self.fake_datetime, + 'languages_spoken': ['en']}, + {'given_name': 'Faker', + 'family_name': 'McEvenFakerson', + 'identifiers': [self.fake_person_id_2], + 'email_addresses': [{'primary': True, + 'address': self.fake_customer_email_2, + 'status': 'subscribed'}], + 'postal_addresses': [{'primary': True, + 'region': '', + 'country': 'US', + 'location': {'latitude': None, + 'longitude': None, + 'accuracy': None}}], + 'created_date': self.fake_datetime, + 'modified_date': self.fake_datetime, + 'languages_spoken': ['en']}]}} self.fake_people_list_2 = { - "per_page": 2, - "page": 2, - "_links": { - "next": {"href": f"{self.api_url}/people?page=3"}, - "osdi:people": [ - {"href": f"{self.api_url}/{self.fake_person_id_1}"}, - {"href": f"{self.api_url}/{self.fake_person_id_2}"}, - ], - "curies": [ - {"name": "osdi", "templated": True}, - {"name": "action_network", "templated": True}, - ], - "self": {"href": f"{self.api_url}/people"}, - }, - "_embedded": { - "osdi:people": [ - { - "given_name": "Fakey", - "family_name": "McFakerson", - "identifiers": [self.fake_person_id_1], - "email_addresses": [ - { - "primary": True, - "address": self.fake_customer_email_1, - "status": "subscribed", - } - ], - "postal_addresses": [ - { - "primary": True, - "region": "", - "country": "US", - "location": { - "latitude": None, - "longitude": None, - "accuracy": None, - }, - } - ], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "languages_spoken": ["en"], - }, - { - "given_name": "Faker", - "family_name": "McEvenFakerson", - "identifiers": [self.fake_person_id_2], - "email_addresses": [ - { - "primary": True, - "address": self.fake_customer_email_2, - "status": "subscribed", - } - ], - "postal_addresses": [ - { - "primary": True, - "region": "", - "country": "US", - "location": { - "latitude": None, - "longitude": None, - "accuracy": None, - }, - } - ], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "languages_spoken": ["en"], - }, - ] - }, - } - self.fake_people_list = ( - self.fake_people_list_1["_embedded"]["osdi:people"] - + self.fake_people_list_2["_embedded"]["osdi:people"] - ) + 'per_page': 2, + 'page': 2, + '_links': {'next': {'href': f"{self.api_url}/people?page=3"}, + 'osdi:people': [{'href': f"{self.api_url}/{self.fake_person_id_1}"}, + {'href': f"{self.api_url}/{self.fake_person_id_2}"}], + 'curies': [{'name': 'osdi', + 'templated': True}, + {'name': 'action_network', + 'templated': True}], + 'self': {'href': f"{self.api_url}/people"}}, + '_embedded': {'osdi:people': [{'given_name': 'Fakey', + 'family_name': 'McFakerson', + 'identifiers': [self.fake_person_id_1], + 'email_addresses': [{'primary': True, + 'address': self.fake_customer_email_1, + 'status': 'subscribed'}], + 'postal_addresses': [{'primary': True, + 'region': '', + 'country': 'US', + 'location': {'latitude': None, + 'longitude': None, + 'accuracy': None}}], + 'created_date': self.fake_datetime, + 'modified_date': self.fake_datetime, + 'languages_spoken': ['en']}, + {'given_name': 'Faker', + 'family_name': 'McEvenFakerson', + 'identifiers': [self.fake_person_id_2], + 'email_addresses': [{'primary': True, + 'address': self.fake_customer_email_2, + 'status': 'subscribed'}], + 'postal_addresses': [{'primary': True, + 'region': '', + 'country': 'US', + 'location': {'latitude': None, + 'longitude': None, + 'accuracy': None}}], + 'created_date': self.fake_datetime, + 'modified_date': self.fake_datetime, + 'languages_spoken': ['en']}]}} + self.fake_people_list = (self.fake_people_list_1['_embedded']['osdi:people'] + + self.fake_people_list_2['_embedded']['osdi:people']) self.fake_tag_list = { - "total_pages": 1, - "per_page": 2, - "page": 1, - "total_records": 2, - "_links": { - "next": {"href": f"{self.api_url}/tags?page=2"}, - "osdi:tags": [ - {"href": f"{self.api_url}/tags/{self.fake_tag_id_1}"}, - {"href": f"{self.api_url}/tags/{self.fake_tag_id_2}"}, - ], - "curies": [ - {"name": "osdi", "templated": True}, - {"name": "action_network", "templated": True}, - ], - "self": {"href": f"{self.api_url}/tags"}, - }, - "_embedded": { - "osdi:tags": [ - { - "name": "fake_tag_1", - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "identifiers": [self.fake_tag_id_1], - "_links": {"self": {"href": self.fake_tag_id_1}}, - }, - { - "name": "fake_tag_2", - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "identifiers": [self.fake_tag_id_1], - "_links": {"self": {"href": self.fake_tag_id_1}}, - }, - ] - }, - } - self.fake_upsert_person = { - "given_name": "Fakey", - "family_name": "McFakerson", - "identifiers": [self.fake_person_id_1], - "email_address": [ - { - "primary": True, - "address": "fakey@mcfakerson.com", - "status": "unsubscribed", - } - ], - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - } + 'total_pages': 1, + 'per_page': 2, + 'page': 1, + 'total_records': 2, + '_links': {'next': {'href': f"{self.api_url}/tags?page=2"}, + 'osdi:tags': [{'href': f"{self.api_url}/tags/{self.fake_tag_id_1}"}, + {'href': f"{self.api_url}/tags/{self.fake_tag_id_2}"}], + 'curies': [{'name': 'osdi', + 'templated': True}, + {'name': 'action_network', + 'templated': True}], + 'self': {'href': f"{self.api_url}/tags"}}, + '_embedded': {'osdi:tags': [{'name': "fake_tag_1", + 'created_date': self.fake_datetime, + 'modified_date': self.fake_datetime, + 'identifiers': [self.fake_tag_id_1], + '_links': + {'self': {'href': self.fake_tag_id_1}}}, + {'name': "fake_tag_2", + 'created_date': self.fake_datetime, + 'modified_date': self.fake_datetime, + 'identifiers': [self.fake_tag_id_1], + '_links': {'self': {'href': self.fake_tag_id_1}}}]}} self.fake_person = [ - { - "given_name": "Fakey", - "family_name": "McFakerson", - "identifiers": [self.fake_person_id_1], - "email_addresses": [ - { - "primary": True, - "address": "fakey@mcfakerson.com", - "status": "unsubscribed", - } - ], - "postal_addresses": [ - { - "primary": True, - "locality": "Washington", - "region": "DC", - "postal_code": "20009", - "country": "US", - "location": { - "latitude": 38.919, - "longitude": -77.0378, - "accuracy": None, - }, - } - ], - "_links": { - "self": {"href": "fake_url"}, - "osdi:signatures": {"href": "fake_url"}, - "osdi:submissions": {"href": "fake_url"}, - "osdi:donations": {"href": "fake_url"}, - "curies": [ - {"name": "osdi", "href": "fake_url", "templated": True}, - { - "name": "action_network", - "href": "fake_url", - "templated": True, - }, - ], - "osdi:taggings": {"href": "fake_url"}, - "osdi:outreaches": {"href": "fake_url"}, - "osdi:attendances": {"href": "fake_url"}, - }, - "custom_fields": {}, - "created_date": self.fake_date, - "modified_date": self.fake_date, - "languages_spoken": ["en"], - } - ] + {'given_name': 'Fakey', + 'family_name': 'McFakerson', + 'identifiers': [self.fake_person_id_1], + 'email_addresses': [{'primary': True, + 'address': 'fakey@mcfakerson.com', + 'status': 'unsubscribed'}], + 'postal_addresses': [{'primary': True, + 'locality': 'Washington', + 'region': 'DC', + 'postal_code': '20009', + 'country': 'US', + 'location': {'latitude': 38.919, + 'longitude': -77.0378, + 'accuracy': None}}], + '_links': {'self': {'href': 'fake_url'}, + 'osdi:signatures': {'href': 'fake_url'}, + 'osdi:submissions': {'href': 'fake_url'}, + 'osdi:donations': {'href': 'fake_url'}, + 'curies': [{'name': 'osdi', + 'href': 'fake_url', + 'templated': True}, + {'name': 'action_network', + 'href': 'fake_url', + 'templated': True}], + 'osdi:taggings': {'href': 'fake_url'}, + 'osdi:outreaches': {'href': 'fake_url'}, + 'osdi:attendances': {'href': 'fake_url'}}, + 'custom_fields': {}, + 'created_date': self.fake_date, + 'modified_date': self.fake_date, + 'languages_spoken': ['en']}] self.updated_fake_person = [ - { - "given_name": "Flakey", - "family_name": "McFlakerson", - "identifiers": [self.fake_person_id_1], - "email_addresses": [ - { - "primary": True, - "address": "fakey@mcfakerson.com", - "status": "unsubscribed", - } - ], - "postal_addresses": [ - { - "primary": True, - "locality": "Washington", - "region": "DC", - "postal_code": "20009", - "country": "US", - "location": { - "latitude": 38.919, - "longitude": -77.0378, - "accuracy": None, - }, - } - ], - "_links": { - "self": {"href": "fake_url"}, - "osdi:signatures": {"href": "fake_url"}, - "osdi:submissions": {"href": "fake_url"}, - "osdi:donations": {"href": "fake_url"}, - "curies": [ - {"name": "osdi", "href": "fake_url", "templated": True}, - { - "name": "action_network", - "href": "fake_url", - "templated": True, - }, - ], - "osdi:taggings": {"href": "fake_url"}, - "osdi:outreaches": {"href": "fake_url"}, - "osdi:attendances": {"href": "fake_url"}, - }, - "custom_fields": {}, - "created_date": self.fake_date, - "modified_date": self.fake_date, - "languages_spoken": ["en"], - } - ] - self.fake_tag = { - "name": "fake_tag_1", - "created_date": self.fake_datetime, - "modified_date": self.fake_datetime, - "identifiers": [self.fake_tag_id_1], - "_links": {"self": {"href": self.fake_tag_id_1}}, - } + {'given_name': 'Flakey', + 'family_name': 'McFlakerson', + 'identifiers': [self.fake_person_id_1], + 'email_addresses': [{'primary': True, + 'address': 'fakey@mcfakerson.com', + 'status': 'unsubscribed'}], + 'postal_addresses': [{'primary': True, + 'locality': 'Washington', + 'region': 'DC', + 'postal_code': '20009', + 'country': 'US', + 'location': {'latitude': 38.919, + 'longitude': -77.0378, + 'accuracy': None}}], + '_links': {'self': {'href': 'fake_url'}, + 'osdi:signatures': {'href': 'fake_url'}, + 'osdi:submissions': {'href': 'fake_url'}, + 'osdi:donations': {'href': 'fake_url'}, + 'curies': [{'name': 'osdi', + 'href': 'fake_url', + 'templated': True}, + {'name': 'action_network', + 'href': 'fake_url', + 'templated': True}], + 'osdi:taggings': {'href': 'fake_url'}, + 'osdi:outreaches': {'href': 'fake_url'}, + 'osdi:attendances': {'href': 'fake_url'}}, + 'custom_fields': {}, + 'created_date': self.fake_date, + 'modified_date': self.fake_date, + 'languages_spoken': ['en']}] + self.fake_tag = {'name': "fake_tag_1", + 'created_date': self.fake_datetime, + 'modified_date': self.fake_datetime, + 'identifiers': [self.fake_tag_id_1], + '_links': {'self': {'href': self.fake_tag_id_1}}} self.fake_location = { "venue": "White House", - "address_lines": ["1600 Pennsylvania Ave"], + "address_lines": [ + "1600 Pennsylvania Ave" + ], "locality": "Washington", "region": "DC", "postal_code": "20009", - "country": "US", + "country": "US" } self.fake_event = { "title": "fake_title", "start_date": self.fake_date, "location": self.fake_location, "_links": { - "self": {"href": "https://actionnetwork.org/api/v2/events/fake-id"}, + "self": { + "href": "https://actionnetwork.org/api/v2/events/fake-id" + }, }, - "event_id": "fake-id", + "event_id": "fake-id" } @requests_mock.Mocker() def test_get_page(self, m): - m.get( - f"{self.api_url}/people?page=2&per_page=2", - text=json.dumps(self.fake_people_list_2), - ) - self.assertEqual(self.an._get_page("people", 2, 2), self.fake_people_list_2) + m.get(f"{self.api_url}/people?page=2&per_page=2", text=json.dumps(self.fake_people_list_2)) + self.assertEqual(self.an._get_page('people', 2, 2), self.fake_people_list_2) @requests_mock.Mocker() def test_get_entry_list(self, m): - m.get( - f"{self.api_url}/people?page=1&per_page=25", - text=json.dumps(self.fake_people_list_1), - ) - m.get( - f"{self.api_url}/people?page=2&per_page=25", - text=json.dumps(self.fake_people_list_2), - ) - m.get( - f"{self.api_url}/people?page=3&per_page=25", - text=json.dumps({"_embedded": {"osdi:people": []}}), - ) - assert_matching_tables( - self.an._get_entry_list("people"), Table(self.fake_people_list) - ) + m.get(f"{self.api_url}/people?page=1&per_page=25", text=json.dumps(self.fake_people_list_1)) + m.get(f"{self.api_url}/people?page=2&per_page=25", text=json.dumps(self.fake_people_list_2)) + m.get(f"{self.api_url}/people?page=3&per_page=25", + text=json.dumps({'_embedded': {"osdi:people": []}})) + assert_matching_tables(self.an._get_entry_list('people'), + Table(self.fake_people_list)) @requests_mock.Mocker() def test_get_people(self, m): - m.get( - f"{self.api_url}/people?page=1&per_page=25", - text=json.dumps(self.fake_people_list_1), - ) - m.get( - f"{self.api_url}/people?page=2&per_page=25", - text=json.dumps(self.fake_people_list_2), - ) - m.get( - f"{self.api_url}/people?page=3&per_page=25", - text=json.dumps({"_embedded": {"osdi:people": []}}), - ) + m.get(f"{self.api_url}/people?page=1&per_page=25", text=json.dumps(self.fake_people_list_1)) + m.get(f"{self.api_url}/people?page=2&per_page=25", text=json.dumps(self.fake_people_list_2)) + m.get(f"{self.api_url}/people?page=3&per_page=25", + text=json.dumps({'_embedded': {"osdi:people": []}})) assert_matching_tables(self.an.get_people(), Table(self.fake_people_list)) @requests_mock.Mocker() def test_get_tags(self, m): - m.get( - f"{self.api_url}/tags?page=1&per_page=25", - text=json.dumps(self.fake_tag_list), - ) - m.get( - f"{self.api_url}/tags?page=2&per_page=25", - text=json.dumps({"_embedded": {"osdi:tags": []}}), - ) - assert_matching_tables( - self.an.get_tags(), Table(self.fake_tag_list["_embedded"]["osdi:tags"]) - ) + m.get(f"{self.api_url}/tags?page=1&per_page=25", text=json.dumps(self.fake_tag_list)) + m.get(f"{self.api_url}/tags?page=2&per_page=25", + text=json.dumps({'_embedded': {"osdi:tags": []}})) + assert_matching_tables(self.an.get_tags(), + Table(self.fake_tag_list['_embedded']['osdi:tags'])) @requests_mock.Mocker() def test_get_person(self, m): - m.get( - f"{self.api_url}/people/{self.fake_person_id_1}", - text=json.dumps(self.fake_person), - ) + m.get(f"{self.api_url}/people/{self.fake_person_id_1}", text=json.dumps(self.fake_person)) self.assertEqual(self.an.get_person(self.fake_person_id_1), self.fake_person) @requests_mock.Mocker() def test_get_tag(self, m): - m.get( - f"{self.api_url}/tags/{self.fake_tag_id_1}", text=json.dumps(self.fake_tag) - ) + m.get(f"{self.api_url}/tags/{self.fake_tag_id_1}", text=json.dumps(self.fake_tag)) self.assertEqual(self.an.get_tag(self.fake_tag_id_1), self.fake_tag) - @requests_mock.Mocker() - def test_upsert_person(self, m): - m.post(f"{self.api_url}/people", text=json.dumps(self.fake_upsert_person)) - self.assertEqual( - self.an.upsert_person(**self.fake_upsert_person), self.fake_upsert_person - ) - @requests_mock.Mocker() def test_update_person(self, m): - m.put( - f"{self.api_url}/people/{self.fake_person_id_1}", - text=json.dumps(self.updated_fake_person), - ) - self.assertEqual( - self.an.update_person( - self.fake_person_id_1, given_name="Flake", family_name="McFlakerson" - ), - self.updated_fake_person, - ) + m.put(f"{self.api_url}/people/{self.fake_person_id_1}", + text=json.dumps(self.updated_fake_person)) + self.assertEqual(self.an.update_person(self.fake_person_id_1, + given_name='Flake', + family_name='McFlakerson'), + self.updated_fake_person) @requests_mock.Mocker() def test_create_event(self, m): @@ -447,58 +278,5 @@ def test_create_event(self, m): self.fake_event.items(), self.an.create_event( "fake_title", start_date=self.fake_date, location=self.fake_location - ).items(), - ) - - @requests_mock.Mocker() - def test_filter_get_people(self, m): - m.get( - f"{self.api_url}/people?page=1&per_page=25&filter={self.fake_filter_by_email_1}", - text=json.dumps(self.fake_people_list_1), - ) - m.get( - f"{self.api_url}/people?page=2&per_page=25&filter={self.fake_filter_by_email_1}", - text=json.dumps(self.fake_people_list_2), - ) - m.get( - f"{self.api_url}/people?page=3&per_page=25&filter={self.fake_filter_by_email_1}", - text=json.dumps({"_embedded": {"osdi:people": []}}), - ) - assert_matching_tables( - self.an.get_people(filter=self.fake_filter_by_email_1), - Table(self.fake_people_list), - ) - - @requests_mock.Mocker() - def test_filter_get_entry_list(self, m): - m.get( - f"{self.api_url}/people?page=1&per_page=25&filter={self.fake_filter_by_email_1}", - text=json.dumps(self.fake_people_list_1), - ) - m.get( - f"{self.api_url}/people?page=2&per_page=25&filter={self.fake_filter_by_email_1}", - text=json.dumps(self.fake_people_list_2), - ) - m.get( - f"{self.api_url}/people?page=3&per_page=25&filter={self.fake_filter_by_email_1}", - text=json.dumps({"_embedded": {"osdi:people": []}}), - ) - assert_matching_tables( - self.an._get_entry_list("people", filter=self.fake_filter_by_email_1), - Table(self.fake_people_list), - ) - - @requests_mock.Mocker() - def test_filter_on_get_unsupported_entry(self, m): - m.get( - f"{self.api_url}/tags?page=1&per_page=25&filter={self.fake_tag_filter}", - text=json.dumps(self.fake_tag_list), - ) - m.get( - f"{self.api_url}/tags?page=2&per_page=25&filter={self.fake_tag_filter}", - text=json.dumps({"_embedded": {"osdi:tags": []}}), - ) - assert_matching_tables( - self.an._get_entry_list("tags", filter=self.fake_tag_filter), - Table(self.fake_tag_list["_embedded"]["osdi:tags"]), + ).items() ) diff --git a/test/test_airtable/airtable_responses.py b/test/test_airtable/airtable_responses.py index e305ab197f..d6e6fb2d27 100644 --- a/test/test_airtable/airtable_responses.py +++ b/test/test_airtable/airtable_responses.py @@ -1,68 +1,51 @@ -records_response = { - "records": [ - { - "id": "recaBMSHTgXREa5ef", - "fields": {"Name": "This is a row!"}, - "createdTime": "2019-05-08T19:37:58.000Z", - }, - { - "id": "recObtmLUrD5dOnmD", - "fields": {}, - "createdTime": "2019-05-08T19:37:58.000Z", - }, - { - "id": "recmeBNnj4cuHPOSI", - "fields": {}, - "createdTime": "2019-05-08T19:37:58.000Z", - }, - ] -} +records_response = {"records": [{"id": "recaBMSHTgXREa5ef", + "fields": {"Name": "This is a row!"}, + "createdTime": "2019-05-08T19:37:58.000Z"}, + {"id": "recObtmLUrD5dOnmD", + "fields": {}, + "createdTime": "2019-05-08T19:37:58.000Z"}, + {"id": "recmeBNnj4cuHPOSI", + "fields": {}, + "createdTime": "2019-05-08T19:37:58.000Z"} + ]} -insert_response = { - "id": "recD4aEaEjQKYZABZ", - "fields": {"Name": "Another row!"}, - "createdTime": "2019-05-13T16:28:18.000Z", -} +insert_response = {'id': 'recD4aEaEjQKYZABZ', + 'fields': {'Name': 'Another row!'}, + 'createdTime': '2019-05-13T16:28:18.000Z'} -insert_responses = [ - { - "id": "recIYuf51JgbmHCHo", - "fields": {"Name": "Another!"}, - "createdTime": "2019-05-13T16:37:03.000Z", - }, - { - "id": "recJMqCfPwFVV5qfc", - "fields": {"Name": "Another row!"}, - "createdTime": "2019-05-13T16:37:03.000Z", - }, -] +insert_responses = [{'id': 'recIYuf51JgbmHCHo', + 'fields': {'Name': 'Another!'}, + 'createdTime': '2019-05-13T16:37:03.000Z'}, + {'id': 'recJMqCfPwFVV5qfc', + 'fields': {'Name': 'Another row!'}, + 'createdTime': '2019-05-13T16:37:03.000Z'}] records_response_with_more_columns = { "records": [ { "id": "recaBMSHTgXREa5ef", "fields": {"Name": "This is a row!"}, - "createdTime": "2019-05-08T19:37:58.000Z", + "createdTime": "2019-05-08T19:37:58.000Z" }, { "id": "recaBMSHTgXvEa5ef", "fields": {"Name": "This is a row!"}, - "createdTime": "2019-05-08T19:37:58.000Z", + "createdTime": "2019-05-08T19:37:58.000Z" }, { "id": "recaBMSHTgXREsaef", "fields": {"Name": "This is a row!"}, - "createdTime": "2019-05-08T19:37:58.000Z", + "createdTime": "2019-05-08T19:37:58.000Z" }, { "id": "recObtmLUrD5dOnmD", "fields": {"Name": "This is a row!", "SecondColumn": ""}, - "createdTime": "2019-05-08T19:37:58.000Z", + "createdTime": "2019-05-08T19:37:58.000Z" }, { "id": "recmeBNnj4cuHPOSI", "fields": {"Name": "This is a row!", "SecondColumn": ""}, - "createdTime": "2019-05-08T19:37:58.000Z", - }, + "createdTime": "2019-05-08T19:37:58.000Z" + } ] } diff --git a/test/test_airtable/test_airtable.py b/test/test_airtable/test_airtable.py index e85db472b3..f4ea924c66 100644 --- a/test/test_airtable/test_airtable.py +++ b/test/test_airtable/test_airtable.py @@ -1,26 +1,24 @@ import unittest import os import requests_mock -from parsons import Airtable, Table +from parsons.airtable import Airtable +from parsons.etl import Table from test.utils import assert_matching_tables -from airtable_responses import ( - insert_response, - insert_responses, - records_response, - records_response_with_more_columns, -) +from airtable_responses import insert_response, insert_responses, \ + records_response, records_response_with_more_columns -os.environ["AIRTABLE_API_KEY"] = "SOME_KEY" -BASE_KEY = "BASEKEY" -TABLE_NAME = "TABLENAME" +os.environ['AIRTABLE_API_KEY'] = 'SOME_KEY' +BASE_KEY = 'BASEKEY' +TABLE_NAME = 'TABLENAME' class TestAirtable(unittest.TestCase): + @requests_mock.Mocker() def setUp(self, m): - self.base_uri = f"https://api.airtable.com/v0/{BASE_KEY}/{TABLE_NAME}" + self.base_uri = f'https://api.airtable.com/v0/{BASE_KEY}/{TABLE_NAME}' m.get(self.base_uri, status_code=200) @@ -29,14 +27,12 @@ def setUp(self, m): @requests_mock.Mocker() def test_get_record(self, m): - record_id = "recObtmLUrD5dOnmD" + record_id = 'recObtmLUrD5dOnmD' - response = { - "id": "recObtmLUrD5dOnmD", - "fields": {}, - "createdTime": "2019-05-08T19:37:58.000Z", - } - m.get(self.base_uri + "/" + record_id, json=response) + response = {'id': 'recObtmLUrD5dOnmD', + 'fields': {}, + 'createdTime': '2019-05-08T19:37:58.000Z'} + m.get(self.base_uri + '/' + record_id, json=response) # Assert the method returns expected dict response self.assertEqual(self.at.get_record(record_id), response) @@ -46,25 +42,15 @@ def test_get_records(self, m): m.get(self.base_uri, json=records_response) - tbl = Table( - [ - { - "id": "recaBMSHTgXREa5ef", - "createdTime": "2019-05-08T19:37:58.000Z", - "Name": "This is a row!", - }, - { - "id": "recObtmLUrD5dOnmD", - "createdTime": "2019-05-08T19:37:58.000Z", - "Name": None, - }, - { - "id": "recmeBNnj4cuHPOSI", - "createdTime": "2019-05-08T19:37:58.000Z", - "Name": None, - }, - ] - ) + tbl = Table([{'id': 'recObtmLUrD5dOnmD', + 'createdTime': '2019-05-08T19:37:58.000Z', + 'Name': None}, + {'id': 'recaBMSHTgXREa5ef', + 'createdTime': '2019-05-08T19:37:58.000Z', + 'Name': 'This is a row!'}, + {'id': 'recmeBNnj4cuHPOSI', + 'createdTime': '2019-05-08T19:37:58.000Z', + 'Name': None}]) self.at.get_records(max_records=1) # Assert that Parsons tables match @@ -77,7 +63,7 @@ def test_get_records_with_1_sample(self, m): airtable_res = self.at.get_records(sample_size=1) - assert airtable_res.columns == ["id", "createdTime", "Name"] + assert airtable_res.columns == ['id', 'createdTime', 'Name'] @requests_mock.Mocker() def test_get_records_with_5_sample(self, m): @@ -86,25 +72,25 @@ def test_get_records_with_5_sample(self, m): airtable_res = self.at.get_records(sample_size=5) - assert airtable_res.columns == ["id", "createdTime", "Name", "SecondColumn"] + assert airtable_res.columns == ['id', 'createdTime', 'Name', 'SecondColumn'] @requests_mock.Mocker() def test_get_records_with_explicit_headers(self, m): m.get(self.base_uri, json=records_response_with_more_columns) - fields = ["Name", "SecondColumn"] + fields = ['Name', 'SecondColumn'] airtable_res = self.at.get_records(fields, sample_size=1) - assert airtable_res.columns == ["id", "createdTime", "Name", "SecondColumn"] + assert airtable_res.columns == ['id', 'createdTime', 'Name', 'SecondColumn'] @requests_mock.Mocker() def test_insert_record(self, m): m.post(self.base_uri, json=insert_response) - resp = self.at.insert_record({"Name": "Another row!"}) + resp = self.at.insert_record({'Name': 'Another row!'}) # Assert that returned dict conforms to expected. self.assertEqual(resp, insert_response) @@ -114,7 +100,7 @@ def test_insert_records(self, m): m.post(self.base_uri, json=insert_responses) - tbl = Table([{"Name": "Another row!"}, {"Name": "Another!"}]) + tbl = Table([{'Name': 'Another row!'}, {'Name': 'Another!'}]) resp = self.at.insert_records(tbl) # Assert that row count is expected @@ -123,15 +109,13 @@ def test_insert_records(self, m): @requests_mock.Mocker() def test_update_records(self, m): - record_id = "recObtmLUrD5dOnmD" + record_id = 'recObtmLUrD5dOnmD' - response = { - "id": "recObtmLUrD5dOnmD", - "fields": {"Name": "AName"}, - "createdTime": "2019-05-13T17:36:28.000Z", - } + response = {'id': 'recObtmLUrD5dOnmD', + 'fields': {'Name': 'AName'}, + 'createdTime': '2019-05-13T17:36:28.000Z'} - m.patch(self.base_uri + "/" + record_id, json=response) + m.patch(self.base_uri + '/' + record_id, json=response) # Assert the method returns expected dict response - self.assertEqual(self.at.update_record(record_id, {"Name": "AName"}), response) + self.assertEqual(self.at.update_record(record_id, {'Name': 'AName'}), response) diff --git a/test/test_alchemer/test_getresponses.py b/test/test_alchemer/test_getresponses.py index 6e7dfb9b4f..bed98a8d41 100644 --- a/test/test_alchemer/test_getresponses.py +++ b/test/test_alchemer/test_getresponses.py @@ -1,7 +1,7 @@ import os import unittest import unittest.mock as mock -from parsons import Alchemer +from parsons.alchemer.alchemer import Alchemer import logging logger = logging.getLogger(__name__) @@ -9,9 +9,9 @@ class TestAlchemErGetResponses(unittest.TestCase): def setUp(self): - os.environ["ALCHEMER_API_TOKEN"] = "MYFAKEAPITOKEN" - os.environ["ALCHEMER_API_TOKEN_SECRET"] = "MYFAKETOKENSECRET" - os.environ["ALCHEMER_API_VERSION"] = "MYFAKEVERSION" + os.environ['ALCHEMER_API_TOKEN'] = 'MYFAKEAPITOKEN' + os.environ['ALCHEMER_API_TOKEN_SECRET'] = 'MYFAKETOKENSECRET' + os.environ['ALCHEMER_API_VERSION'] = 'MYFAKEVERSION' self.alchemer = Alchemer() self.alchemer._client = mock.MagicMock() @@ -34,123 +34,125 @@ def test_get_responses_single_page(self): self.assertEqual(2, actual_responses.num_rows) for i in range(0, 1): - self.assertEqual( - api_return["data"][i]["session_id"], actual_responses[i]["session_id"] - ) + self.assertEqual(api_return["data"][i]["session_id"], actual_responses[i]["session_id"]) def _get_responses_return_single_page(self): return { - "result_ok": True, - "total_count": 2, - "page": 1, - "total_pages": 1, - "results_per_page": 50, - "data": [ - { - "id": "1", - "contact_id": "", - "status": "Complete", - "is_test_data": "0", - "date_submitted": "2018-09-27 10:42:26 EDT", - "session_id": "1538059336_5bacec4869caa2.27680217", - "language": "English", - "date_started": "2018-09-27 10:42:16 EDT", - "link_id": "7473882", - "url_variables": [], - "ip_address": "50.232.185.226", - "referer": "https://app.surveygizmo.com/distribute/share/id/4599075", - "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36", # noqa - "response_time": 10, - "data_quality": [], - "longitude": "-105.20369720459", - "latitude": "40.050701141357", - "country": "United States", - "city": "Boulder", - "region": "CO", - "postal": "80301", - "dma": "751", - "survey_data": { - "2": { - "id": 2, - "type": "RADIO", - "question": "Will you attend the event?", - "section_id": 1, - "original_answer": "Yes", - "answer": "1", - "answer_id": 10001, - "shown": True, + "result_ok": True, + "total_count": 2, + "page": 1, + "total_pages": 1, + "results_per_page": 50, + "data": [ + { + "id": "1", + "contact_id": "", + "status": "Complete", + "is_test_data": "0", + "date_submitted": "2018-09-27 10:42:26 EDT", + "session_id": "1538059336_5bacec4869caa2.27680217", + "language": "English", + "date_started": "2018-09-27 10:42:16 EDT", + "link_id": "7473882", + "url_variables": [], + "ip_address": "50.232.185.226", + "referer": "https://app.surveygizmo.com/distribute/share/id/4599075", + "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36", # noqa + "response_time": 10, + "data_quality": [], + "longitude": "-105.20369720459", + "latitude": "40.050701141357", + "country": "United States", + "city": "Boulder", + "region": "CO", + "postal": "80301", + "dma": "751", + "survey_data": { + "2": { + "id": 2, + "type": "RADIO", + "question": "Will you attend the event?", + "section_id": 1, + "original_answer": "Yes", + "answer": "1", + "answer_id": 10001, + "shown": True + }, + "3": { + "id": 3, + "type": "TEXTBOX", + "question": "How many guests will you bring?", + "section_id": 1, + "answer": "3", + "shown": True + }, + "4": { + "id": 4, + "type": "TEXTBOX", + "question": "How many guests are under the age of 18?", + "section_id": 1, + "answer": "2", + "shown": True + } + } }, - "3": { - "id": 3, - "type": "TEXTBOX", - "question": "How many guests will you bring?", - "section_id": 1, - "answer": "3", - "shown": True, - }, - "4": { - "id": 4, - "type": "TEXTBOX", - "question": "How many guests are under the age of 18?", - "section_id": 1, - "answer": "2", - "shown": True, - }, - }, - }, - { - "id": "2", - "contact_id": "", - "status": "Complete", - "is_test_data": "0", - "date_submitted": "2018-09-27 10:43:11 EDT", - "session_id": "1538059381_5bacec751e41f4.51482165", - "language": "English", - "date_started": "2018-09-27 10:43:01 EDT", - "link_id": "7473882", - "url_variables": { - "__dbget": {"key": "__dbget", "value": "True", "type": "url"} - }, - "ip_address": "50.232.185.226", - "referer": "", - "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36", # noqa - "response_time": 10, - "data_quality": [], - "longitude": "-105.20369720459", - "latitude": "40.050701141357", - "country": "United States", - "city": "Boulder", - "region": "CO", - "postal": "80301", - "dma": "751", - "survey_data": { - "2": { - "id": 2, - "type": "RADIO", - "question": "Will you attend the event?", - "section_id": 1, - "original_answer": "1", - "answer": "1", - "answer_id": 10001, - "shown": True, - }, - "3": { - "id": 3, - "type": "TEXTBOX", - "question": "How many guests will you bring?", - "section_id": 1, - "answer": "2", - "shown": True, - }, - "4": { - "id": 4, - "type": "TEXTBOX", - "question": "How many guests are under the age of 18?", - "section_id": 1, - "answer": "0", - "shown": True, - }, - }, - }, - ], - } + { + "id": "2", + "contact_id": "", + "status": "Complete", + "is_test_data": "0", + "date_submitted": "2018-09-27 10:43:11 EDT", + "session_id": "1538059381_5bacec751e41f4.51482165", + "language": "English", + "date_started": "2018-09-27 10:43:01 EDT", + "link_id": "7473882", + "url_variables": { + "__dbget": { + "key": "__dbget", + "value": "True", + "type": "url" + } + }, + "ip_address": "50.232.185.226", + "referer": "", + "user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36", # noqa + "response_time": 10, + "data_quality": [], + "longitude": "-105.20369720459", + "latitude": "40.050701141357", + "country": "United States", + "city": "Boulder", + "region": "CO", + "postal": "80301", + "dma": "751", + "survey_data": { + "2": { + "id": 2, + "type": "RADIO", + "question": "Will you attend the event?", + "section_id": 1, + "original_answer": "1", + "answer": "1", + "answer_id": 10001, + "shown": True + }, + "3": { + "id": 3, + "type": "TEXTBOX", + "question": "How many guests will you bring?", + "section_id": 1, + "answer": "2", + "shown": True + }, + "4": { + "id": 4, + "type": "TEXTBOX", + "question": "How many guests are under the age of 18?", + "section_id": 1, + "answer": "0", + "shown": True + } + } + } + ] + } diff --git a/test/test_alchemer/test_getsurveys.py b/test/test_alchemer/test_getsurveys.py index 62a5044111..7dc28b9e3b 100644 --- a/test/test_alchemer/test_getsurveys.py +++ b/test/test_alchemer/test_getsurveys.py @@ -1,7 +1,7 @@ import os import unittest import unittest.mock as mock -from parsons import Alchemer +from parsons.alchemer.alchemer import Alchemer import logging logger = logging.getLogger(__name__) @@ -12,9 +12,9 @@ class TestAlchemErGetSurveys(unittest.TestCase): def setUp(self): - os.environ["ALCHEMER_API_TOKEN"] = "MYFAKEAPITOKEN" - os.environ["ALCHEMER_API_TOKEN_SECRET"] = "MYFAKETOKENSECRET" - os.environ["ALCHEMER_API_VERSION"] = "MYFAKEVERSION" + os.environ['ALCHEMER_API_TOKEN'] = 'MYFAKEAPITOKEN' + os.environ['ALCHEMER_API_TOKEN_SECRET'] = 'MYFAKETOKENSECRET' + os.environ['ALCHEMER_API_VERSION'] = 'MYFAKEVERSION' self.alchemer = Alchemer() self.alchemer._client = mock.MagicMock() @@ -39,41 +39,46 @@ def test_removes_links_field(self): def _get_surveys_return_single_page(self): return { - "result_ok": True, - "total_count": "1461", - "page": 1, - "total_pages": 1, - "results_per_page": 50, - "data": [ - { - "id": "1234567", - "team": "433737", - "type": "Standard Survey", - "status": "Launched", - "created_on": "2017-04-24 10:44:23", - "modified_on": "2017-04-24 10:58:20", - "title": "Survey", - "statistics": {"Partial": 4, "Complete": 2}, - "links": { - "edit": "[Link to Build Tab]", - "publish": "[Link to Share Tab]", - "default": "[Default Share Link]", - }, - }, - { - "id": "1234568", - "team": "433737", - "type": "Standard Survey", - "status": "Launched", - "created_on": "2017-04-24 09:53:01", - "modified_on": "2017-04-24 09:53:55", - "title": "Survey", - "statistics": {"Partial": 1}, - "links": { - "edit": "[Link to Build Tab]", - "publish": "[Link to Share Tab]", - "default": "[Default Share Link]", - }, - }, - ], - } + "result_ok": True, + "total_count": "1461", + "page": 1, + "total_pages": 1, + "results_per_page": 50, + "data": [ + { + "id": "1234567", + "team": "433737", + "type": "Standard Survey", + "status": "Launched", + "created_on": "2017-04-24 10:44:23", + "modified_on": "2017-04-24 10:58:20", + "title": "Survey", + "statistics": { + "Partial": 4, + "Complete": 2 + }, + "links": { + "edit": "[Link to Build Tab]", + "publish": "[Link to Share Tab]", + "default": "[Default Share Link]" + } + }, + { + "id": "1234568", + "team": "433737", + "type": "Standard Survey", + "status": "Launched", + "created_on": "2017-04-24 09:53:01", + "modified_on": "2017-04-24 09:53:55", + "title": "Survey", + "statistics": { + "Partial": 1 + }, + "links": { + "edit": "[Link to Build Tab]", + "publish": "[Link to Share Tab]", + "default": "[Default Share Link]" + } + } + ] + } diff --git a/test/test_auth0.py b/test/test_auth0.py deleted file mode 100644 index f15627f9c6..0000000000 --- a/test/test_auth0.py +++ /dev/null @@ -1,62 +0,0 @@ -import unittest -import unittest.mock -from test.utils import assert_matching_tables - -import requests_mock -from parsons import Auth0, Table - -CLIENT_ID = "abc" -CLIENT_SECRET = "def" -DOMAIN = "fakedomain.auth0.com" - - -class TestAuth0(unittest.TestCase): - def setUp(self): - self.auth0 = Auth0(CLIENT_ID, CLIENT_SECRET, DOMAIN) - self.fake_upsert_person = { - "email": "fakeemail@fakedomain.com", - "given_name": "Fakey", - "family_name": "McFakerson", - "username": "fakeusername", - "user_id": 3, - } - - @requests_mock.Mocker() - def test_delete_user(self, m): - user_id = 1 - m.delete(f"{self.auth0.base_url}/api/v2/users/{user_id}", status_code=204) - self.assertEqual(self.auth0.delete_user(user_id), 204) - - @requests_mock.Mocker() - def test_get_users_by_email(self, m): - email = "fakeemail@fakedomain.com" - mock_users = [{"email": "fake3mail@fakedomain.com", "id": 2}] - m.get( - f"{self.auth0.base_url}/api/v2/users-by-email?email={email}", - json=mock_users, - ) - assert_matching_tables( - self.auth0.get_users_by_email(email), Table(mock_users), True - ) - - @requests_mock.Mocker() - def test_upsert_user(self, m): - user = self.fake_upsert_person - email = user["email"] - m.get( - f"{self.auth0.base_url}/api/v2/users-by-email?email={email}", - json=[user], - ) - mock_resp = unittest.mock.MagicMock() - mock_resp.status_code = 200 - m.patch(f"{self.auth0.base_url}/api/v2/users/{user['user_id']}", [mock_resp]) - m.post(f"{self.auth0.base_url}/api/v2/users", mock_resp) - ret = self.auth0.upsert_user( - email, - user["username"], - user["given_name"], - user["family_name"], - {}, - {}, - ) - self.assertEqual(ret.status_code, 200) diff --git a/test/test_aws_async.py b/test/test_aws_async.py index 2c005cbe76..ebac611e9f 100644 --- a/test/test_aws_async.py +++ b/test/test_aws_async.py @@ -1,7 +1,7 @@ import unittest from test.utils import assert_matching_tables -from parsons import Table +from parsons.etl.table import Table from parsons.aws.aws_async import import_and_get_task, get_func_task_path from parsons.aws.lambda_distribute import distribute_task @@ -22,6 +22,7 @@ def fake_table_process(table, **fakekwargs): class FakeRunner(object): + def __init__(self, init1=None): self.init1 = init1 @@ -30,98 +31,81 @@ def foobar(self, table, a, x=None, y=None): global tableargs count = count + 1 tableargs = (table, self.init1, a, x, y) - if a == "raise": - raise Exception("foo bar") + if a == 'raise': + raise Exception('foo bar') class TestAsync(unittest.TestCase): + def test_task_path_conversion(self): # fake_func fake_str = get_func_task_path(fake_func) - print("fake_str", fake_str) + print('fake_str', fake_str) fake_renewed = import_and_get_task(fake_str) self.assertEqual(fake_renewed(1, 2, 3), (1, 2, 3, 56)) # Table.from_csv_string @classmethod csv_str = get_func_task_path(Table.from_csv_string, Table) - print("csv_str", csv_str) + print('csv_str', csv_str) csv_renewed = import_and_get_task(csv_str) - assert_matching_tables(csv_renewed("x,y\n1,2"), Table([("x", "y"), ("1", "2")])) + assert_matching_tables(csv_renewed('x,y\n1,2'), + Table([('x', 'y'), (1, 2)])) # Table.to_dicts (instance) dicts_str = get_func_task_path(Table.to_dicts, Table) - print("dicts_str", dicts_str) - dicts_renewed = import_and_get_task(dicts_str, {"lst": [("x", "y"), (1, 2)]}) - self.assertEqual(dicts_renewed(), [{"x": 1, "y": 2}]) + print('dicts_str', dicts_str) + dicts_renewed = import_and_get_task(dicts_str, + {'lst': [('x', 'y'), (1, 2)]}) + self.assertEqual(dicts_renewed(), + [{'x': 1, 'y': 2}]) def test_distribute_task(self): global count global tableargs datatable = [ - ("x", "y"), - (1, 2), - (3, 4), - (5, 6), - (7, 8), - (9, 0), - (11, 12), - (13, 14), - (15, 16), - (17, 18), - (19, 10), + ('x', 'y'), + (1, 2), (3, 4), (5, 6), (7, 8), (9, 0), + (11, 12), (13, 14), (15, 16), (17, 18), (19, 10), ] count = 0 tableargs = None - distribute_task( - Table(datatable), - fake_table_process, - "foo", # bucket - group_count=5, - storage="local", - func_kwargs={"x": 1, "y": [2, 3]}, - ) + distribute_task(Table(datatable), + fake_table_process, + 'foo', # bucket + group_count=5, + storage='local', + func_kwargs={'x': 1, 'y': [2, 3]}) self.assertEqual(count, 2) assert_matching_tables( tableargs[0], - Table( - [ - ("x", "y"), - ("11", "12"), - ("13", "14"), - ("15", "16"), - ("17", "18"), - ("19", "10"), - ] - ), - ) + Table([('x', 'y'), + (11, 12), (13, 14), (15, 16), (17, 18), (19, 10)])) count = 0 tableargs = None - distribute_task( - Table(datatable + [(0, 0)]), - FakeRunner.foobar, - "foo", # bucket - group_count=5, - storage="local", - func_class=FakeRunner, - func_class_kwargs={"init1": "initx"}, - func_kwargs={"a": 1, "x": 2, "y": 3}, - ) + distribute_task(Table(datatable + [(0, 0)]), + FakeRunner.foobar, + 'foo', # bucket + group_count=5, + storage='local', + func_class=FakeRunner, + func_class_kwargs={'init1': 'initx'}, + func_kwargs={'a': 1, 'x': 2, 'y': 3}) self.assertEqual(count, 3) - self.assertEqual(tableargs[1:], ("initx", 1, 2, 3)) - self.assertEqual(tableargs[1:], ("initx", 1, 2, 3)) - assert_matching_tables(tableargs[0], Table([("x", "y"), ("0", "0")])) + self.assertEqual(tableargs[1:], ('initx', 1, 2, 3)) + self.assertEqual(tableargs[1:], ('initx', 1, 2, 3)) + assert_matching_tables( + tableargs[0], + Table([('x', 'y'), (0, 0)])) # 3. catch=True (with throwing) count = 0 tableargs = None - distribute_task( - Table(datatable[:6]), - FakeRunner.foobar, - "foo", # bucket - group_count=5, - storage="local", - func_class=FakeRunner, - func_class_kwargs={"init1": "initx"}, - catch=True, - func_kwargs={"a": "raise", "x": 2, "y": 3}, - ) + distribute_task(Table(datatable[:6]), + FakeRunner.foobar, + 'foo', # bucket + group_count=5, + storage='local', + func_class=FakeRunner, + func_class_kwargs={'init1': 'initx'}, + catch=True, + func_kwargs={'a': 'raise', 'x': 2, 'y': 3}) diff --git a/test/test_azure/test_azure_blob_storage.py b/test/test_azure/test_azure_blob_storage.py index b9ded2678f..8751a6731b 100644 --- a/test/test_azure/test_azure_blob_storage.py +++ b/test/test_azure/test_azure_blob_storage.py @@ -5,18 +5,19 @@ from azure.storage.blob import BlobClient, ContainerClient -from parsons import AzureBlobStorage, Table +from parsons.azure import AzureBlobStorage +from parsons.etl import Table from parsons.utilities import files -TEST_ACCOUNT_NAME = os.getenv("PARSONS_AZURE_ACCOUNT_NAME") -TEST_CREDENTIAL = os.getenv("PARSONS_AZURE_CREDENTIAL") -TEST_CONTAINER_NAME = os.getenv("PARSONS_AZURE_CONTAINER_NAME") -TEST_FILE_NAME = "tmp_file_01.txt" -TEST_FILE_CONTENTS = "Test" +TEST_ACCOUNT_NAME = os.getenv('PARSONS_AZURE_ACCOUNT_NAME') +TEST_CREDENTIAL = os.getenv('PARSONS_AZURE_CREDENTIAL') +TEST_CONTAINER_NAME = os.getenv('PARSONS_AZURE_CONTAINER_NAME') +TEST_FILE_NAME = 'tmp_file_01.txt' +TEST_FILE_CONTENTS = 'Test' -@unittest.skipIf(not os.getenv("LIVE_TEST"), "Skipping because not running live test") +@unittest.skipIf(not os.getenv('LIVE_TEST'), 'Skipping because not running live test') class TestAzureBlobStorage(unittest.TestCase): def setUp(self): @@ -30,7 +31,7 @@ def setUp(self): # Create blob if it doesn't exist already if not self.azure_blob.blob_exists(TEST_CONTAINER_NAME, TEST_FILE_NAME): - tmp_file_path = files.string_to_temp_file(TEST_FILE_CONTENTS, suffix=".txt") + tmp_file_path = files.string_to_temp_file(TEST_FILE_CONTENTS, suffix='.txt') self.azure_blob.put_blob(TEST_CONTAINER_NAME, TEST_FILE_NAME, tmp_file_path) def test_list_containers(self): @@ -45,14 +46,12 @@ def test_container_exists(self): self.assertTrue(self.azure_blob.container_exists(TEST_CONTAINER_NAME)) # Assert that invalid bucket does not exists - self.assertFalse(self.azure_blob.container_exists("fakecontainer")) + self.assertFalse(self.azure_blob.container_exists('fakecontainer')) def test_get_container(self): # Assert that a ContainerClient object is returned - self.assertIsInstance( - self.azure_blob.get_container(TEST_CONTAINER_NAME), ContainerClient - ) + self.assertIsInstance(self.azure_blob.get_container(TEST_CONTAINER_NAME), ContainerClient) def test_create_container(self): @@ -61,14 +60,14 @@ def test_create_container(self): # Add current datetime microseconds for randomness to avoid intermittent failures dt_microseconds = datetime.now().isoformat()[-6:] - create_container_name = f"{TEST_CONTAINER_NAME}create{dt_microseconds}" + create_container_name = f'{TEST_CONTAINER_NAME}create{dt_microseconds}' # Create a new container with metadata, assert that it is included create_container = self.azure_blob.create_container( - create_container_name, metadata={"testing": "parsons"} + create_container_name, metadata={'testing': 'parsons'} ) create_container_properties = create_container.get_container_properties() - self.assertIn("testing", create_container_properties.metadata) + self.assertIn('testing', create_container_properties.metadata) # Delete the container after the assertion self.azure_blob.delete_container(create_container_name) @@ -77,7 +76,7 @@ def test_delete_container(self): # Add current datetime microseconds for randomness to avoid intermittent failures dt_microseconds = datetime.now().isoformat()[-6:] - delete_container_name = f"{TEST_CONTAINER_NAME}delete{dt_microseconds}" + delete_container_name = f'{TEST_CONTAINER_NAME}delete{dt_microseconds}' # Create an additional container, assert that it exists self.azure_blob.create_container(delete_container_name) @@ -97,12 +96,10 @@ def test_list_blobs(self): def test_blob_exists(self): # Assert that blob created in setup exists - self.assertTrue( - self.azure_blob.blob_exists(TEST_CONTAINER_NAME, TEST_FILE_NAME) - ) + self.assertTrue(self.azure_blob.blob_exists(TEST_CONTAINER_NAME, TEST_FILE_NAME)) # Assert that invalid blob does not exist - self.assertFalse(self.azure_blob.blob_exists(TEST_CONTAINER_NAME, "FAKE_BLOB")) + self.assertFalse(self.azure_blob.blob_exists(TEST_CONTAINER_NAME, 'FAKE_BLOB')) def test_get_blob(self): @@ -114,18 +111,16 @@ def test_get_blob(self): def test_get_blob_url(self): # Assert that get_blob_url returns a URL with a shared access signature - blob_url = self.azure_blob.get_blob_url( - TEST_CONTAINER_NAME, TEST_FILE_NAME, permission="r" - ) + blob_url = self.azure_blob.get_blob_url(TEST_CONTAINER_NAME, TEST_FILE_NAME, permission='r') parsed_blob_url = urlparse(blob_url) parsed_blob_query = parse_qs(parsed_blob_url.query) - self.assertIn("sas", parsed_blob_query) + self.assertIn('sas', parsed_blob_query) def test_put_blob(self): # Assert that put_blob returns a BlobClient object - put_blob_name = "tmp_file_put.txt" - tmp_file_path = files.string_to_temp_file("Test", suffix=".txt") + put_blob_name = 'tmp_file_put.txt' + tmp_file_path = files.string_to_temp_file('Test', suffix='.txt') put_blob_client = self.azure_blob.put_blob( TEST_CONTAINER_NAME, put_blob_name, tmp_file_path @@ -137,42 +132,34 @@ def test_put_blob(self): def test_download_blob(self): # Download blob and ensure that it has the expected file contents - download_blob_path = self.azure_blob.download_blob( - TEST_CONTAINER_NAME, TEST_FILE_NAME - ) - with open(download_blob_path, "r") as f: + download_blob_path = self.azure_blob.download_blob(TEST_CONTAINER_NAME, TEST_FILE_NAME) + with open(download_blob_path, 'r') as f: self.assertEqual(f.read(), TEST_FILE_CONTENTS) def test_delete_blob(self): - delete_blob_name = "delete_blob.txt" + delete_blob_name = 'delete_blob.txt' # Upload a blob, assert that it exists - tmp_file_path = files.string_to_temp_file(TEST_FILE_CONTENTS, suffix=".txt") + tmp_file_path = files.string_to_temp_file(TEST_FILE_CONTENTS, suffix='.txt') self.azure_blob.put_blob(TEST_CONTAINER_NAME, delete_blob_name, tmp_file_path) - self.assertTrue( - self.azure_blob.blob_exists(TEST_CONTAINER_NAME, delete_blob_name) - ) + self.assertTrue(self.azure_blob.blob_exists(TEST_CONTAINER_NAME, delete_blob_name)) # Delete the blob, assert that it no longer exists self.azure_blob.delete_blob(TEST_CONTAINER_NAME, delete_blob_name) - self.assertFalse( - self.azure_blob.blob_exists(TEST_CONTAINER_NAME, delete_blob_name) - ) + self.assertFalse(self.azure_blob.blob_exists(TEST_CONTAINER_NAME, delete_blob_name)) def test_upload_table(self): - test_table = Table([{"first": "Test", "last": "Person"}]) - test_table_blob_name = "table.csv" + test_table = Table([{'first': 'Test', 'last': 'Person'}]) + test_table_blob_name = 'table.csv' # Upload a test table as CSV, assert that blob is a CSV table_blob_client = self.azure_blob.upload_table( - test_table, TEST_CONTAINER_NAME, test_table_blob_name, data_type="csv" + test_table, TEST_CONTAINER_NAME, test_table_blob_name, data_type='csv' ) table_blob_client_properties = table_blob_client.get_blob_properties() - self.assertEqual( - table_blob_client_properties.content_settings.content_type, "text/csv" - ) + self.assertEqual(table_blob_client_properties.content_settings.content_type, 'text/csv') # Remove blob after assertion self.azure_blob.delete_blob(TEST_CONTAINER_NAME, test_table_blob_name) diff --git a/test/test_bill_com/test_bill_com.py b/test/test_bill_com/test_bill_com.py index 2014788259..33adb3f592 100644 --- a/test/test_bill_com/test_bill_com.py +++ b/test/test_bill_com/test_bill_com.py @@ -6,200 +6,173 @@ class TestBillCom(unittest.TestCase): + @requests_mock.Mocker() def setUp(self, m): - self.api_url = "http://FAKEURL.com/" - - m.post( - self.api_url + "Login.json", - text=json.dumps({"response_data": {"sessionId": "FAKE"}}), - ) - self.bc = BillCom("FAKE", "FAKE", "FAKE", "FAKE", self.api_url) - - self.fake_time = "2019-02-29T00:00:00.000+0000" - self.fake_date = "2019-02-29" - self.fake_customer_email = "fake_customer_email@fake_customer_email.com" - - self.fake_user_list = { - "response_status": 0, - "response_message": "Success", - "response_data": [ - { - "entity": "User", - "id": "fake_user_id", - "isActive": "1", - "createdTime": self.fake_time, - "updatedTime": self.fake_time, - "loginId": "fake_login_id", - "profileId": "fake_profile_id", - "firstName": "fake_first_name", - "lastName": "fake_last_name", - "email": "fake_email@fake_email.com", - "timezoneId": "7", - "partnerUserGroupType": "2", - } - ], - } - - self.fake_customer_list = { - "response_status": 0, - "response_message": "Success", - "response_data": [ - { - "entity": "Customer", - "id": "fake_customer_id", - "isActive": "1", - "createdTime": self.fake_time, - "updatedTime": self.fake_time, - "name": "fake_customer_name", - "shortName": "fake_shorter_customer_name", - "parentCustomerId": "0", - "companyName": "fake_company_name", - "contactFirstName": "fake_first_name", - "contactLastName": "fake_last_name", - "accNumber": "0", - "billAddress1": "123 Fake Street", - "billAddress2": "Suite 13", - "billAddress3": None, - "billAddress4": None, - "billAddressCity": "Fake City", - "billAddressState": "FS", - "billAddressCountry": "USA", - "billAddressZip": "11111", - "shipAddress1": "123 Fake Street", - "shipAddress2": "Office 13", - "shipAddress3": None, - "shipAddress4": None, - "shipAddressCity": "Fake City", - "shipAddressState": "FS", - "shipAddressCountry": "USA", - "shipAddressZip": "11111", - "email": self.fake_customer_email, - "phone": "123-123-1234", - "altPhone": "123-123-1256", - "fax": "123-123-1235", - "description": "fake_description", - "printAs": None, - "mergedIntoId": "0", - "hasAuthorizedToCharge": False, - "accountType": "1", - } - ], - } - - self.fake_customer_read_json = { - "response_status": 0, - "response_message": "Success", - "response_data": self.fake_customer_list["response_data"][0], - } - - self.fake_invoice_list = { - "response_status": 0, - "response_message": "Success", - "response_data": [ - { - "entity": "Invoice", - "id": "fake_invoice_id", - "isActive": "1", - "createdTime": self.fake_time, - "updatedTime": self.fake_time, - "customerId": "fake_customer_id", - "invoiceNumber": "fake_invoice_number", - "invoiceDate": self.fake_date, - "dueDate": self.fake_date, - "glPostingDate": self.fake_date, - "amount": 1.0, - "amountDue": 1.0, - "paymentStatus": "1", - "description": None, - "poNumber": None, - "isToBePrinted": False, - "isToBeEmailed": False, - "lastSentTime": None, - "itemSalesTax": "0", - "salesTaxPercentage": 0, - "salesTaxTotal": 0.0, - "terms": None, - "salesRep": None, - "FOB": None, - "shipDate": None, - "shipMethod": None, - "departmentId": "0", - "locationId": "0", - "actgClassId": "0", - "jobId": "0", - "payToBankAccountId": "0", - "payToChartOfAccountId": "0", - "invoiceTemplateId": "0", - "hasAutoPay": False, - "source": "0", - "emailDeliveryOption": "1", - "mailDeliveryOption": "0", - "creditAmount": 0.0, - "invoiceLineItems": [ - { - "entity": "InvoiceLineItem", - "id": "fake_line_item_id", - "createdTime": self.fake_time, - "updatedTime": self.fake_time, - "invoiceId": "fake_invoice_id", - "itemId": "0", - "quantity": 1, - "amount": 1.0, - "price": None, - "serviceDate": None, - "ratePercent": None, - "chartOfAccountId": "0", - "departmentId": "0", - "locationId": "0", - "actgClassId": "0", - "jobId": "0", - "description": None, - "taxable": False, - "taxCode": "Non", - } - ], - } - ], - } - - self.fake_invoice_read_json = { - "response_status": 0, - "response_message": "Success", - "response_data": self.fake_invoice_list["response_data"][0], - } - - self.fake_invoice_line_items = self.fake_invoice_list["response_data"][0][ - "invoiceLineItems" - ] + self.api_url = 'http://FAKEURL.com/' + + m.post(self.api_url + 'Login.json', + text=json.dumps({'response_data': {'sessionId': 'FAKE'}})) + self.bc = BillCom('FAKE', 'FAKE', 'FAKE', 'FAKE', self.api_url) + + self.fake_time = '2019-02-29T00:00:00.000+0000' + self.fake_date = '2019-02-29' + self.fake_customer_email = 'fake_customer_email@fake_customer_email.com' + + self.fake_user_list = {'response_status': 0, + 'response_message': 'Success', + 'response_data': [{'entity': 'User', + 'id': 'fake_user_id', + 'isActive': '1', + 'createdTime': self.fake_time, + 'updatedTime': self.fake_time, + 'loginId': 'fake_login_id', + 'profileId': 'fake_profile_id', + 'firstName': 'fake_first_name', + 'lastName': 'fake_last_name', + 'email': 'fake_email@fake_email.com', + 'timezoneId': '7', + 'partnerUserGroupType': '2'}]} + + self.fake_customer_list = {'response_status': 0, + 'response_message': 'Success', + 'response_data': [{'entity': 'Customer', + 'id': 'fake_customer_id', + 'isActive': '1', + 'createdTime': self.fake_time, + 'updatedTime': self.fake_time, + 'name': 'fake_customer_name', + 'shortName': 'fake_shorter_customer_name', + 'parentCustomerId': '0', + 'companyName': 'fake_company_name', + 'contactFirstName': 'fake_first_name', + 'contactLastName': 'fake_last_name', + 'accNumber': '0', + 'billAddress1': '123 Fake Street', + 'billAddress2': 'Suite 13', + 'billAddress3': None, + 'billAddress4': None, + 'billAddressCity': 'Fake City', + 'billAddressState': 'FS', + 'billAddressCountry': 'USA', + 'billAddressZip': '11111', + 'shipAddress1': '123 Fake Street', + 'shipAddress2': 'Office 13', + 'shipAddress3': None, + 'shipAddress4': None, + 'shipAddressCity': 'Fake City', + 'shipAddressState': 'FS', + 'shipAddressCountry': 'USA', + 'shipAddressZip': '11111', + 'email': self.fake_customer_email, + 'phone': '123-123-1234', + 'altPhone': '123-123-1256', + 'fax': '123-123-1235', + 'description': 'fake_description', + 'printAs': None, + 'mergedIntoId': '0', + 'hasAuthorizedToCharge': False, + 'accountType': '1'}]} + + self.fake_customer_read_json = {'response_status': 0, + 'response_message': 'Success', + 'response_data': + self.fake_customer_list['response_data'][0]} + + self.fake_invoice_list = {'response_status': 0, + 'response_message': 'Success', + 'response_data': [{'entity': 'Invoice', + 'id': 'fake_invoice_id', + 'isActive': '1', + 'createdTime': self.fake_time, + 'updatedTime': self.fake_time, + 'customerId': 'fake_customer_id', + 'invoiceNumber': 'fake_invoice_number', + 'invoiceDate': self.fake_date, + 'dueDate': self.fake_date, + 'glPostingDate': self.fake_date, + 'amount': 1.0, + 'amountDue': 1.0, + 'paymentStatus': '1', + 'description': None, + 'poNumber': None, + 'isToBePrinted': False, + 'isToBeEmailed': False, + 'lastSentTime': None, + 'itemSalesTax': '0', + 'salesTaxPercentage': 0, + 'salesTaxTotal': 0.0, + 'terms': None, + 'salesRep': None, + 'FOB': None, + 'shipDate': None, + 'shipMethod': None, + 'departmentId': '0', + 'locationId': '0', + 'actgClassId': '0', + 'jobId': '0', + 'payToBankAccountId': '0', + 'payToChartOfAccountId': '0', + 'invoiceTemplateId': '0', + 'hasAutoPay': False, + 'source': '0', + 'emailDeliveryOption': '1', + 'mailDeliveryOption': '0', + 'creditAmount': 0.0, + 'invoiceLineItems': [{'entity': + 'InvoiceLineItem', + 'id': + 'fake_line_item_id', + 'createdTime': + self.fake_time, + 'updatedTime': + self.fake_time, + 'invoiceId': + 'fake_invoice_id', + 'itemId': '0', + 'quantity': 1, + 'amount': 1.0, + 'price': None, + 'serviceDate': None, + 'ratePercent': None, + 'chartOfAccountId': '0', + 'departmentId': '0', + 'locationId': '0', + 'actgClassId': '0', + 'jobId': '0', + 'description': None, + 'taxable': False, + 'taxCode': 'Non'}]}]} + + self.fake_invoice_read_json = \ + {'response_status': 0, + 'response_message': 'Success', + 'response_data': + self.fake_invoice_list['response_data'][0]} + + self.fake_invoice_line_items = \ + self.fake_invoice_list['response_data'][0]['invoiceLineItems'] def test_init(self): - self.assertEqual(self.bc.session_id, "FAKE") + self.assertEqual(self.bc.session_id, 'FAKE') def test_get_payload(self): - fake_json = {"fake_key": "fake_data"} + fake_json = {'fake_key': 'fake_data'} payload = self.bc._get_payload(fake_json) - self.assertEqual( - payload, - { - "devKey": self.bc.dev_key, - "sessionId": self.bc.session_id, - "data": json.dumps(fake_json), - }, - ) + self.assertEqual(payload, {'devKey': self.bc.dev_key, + 'sessionId': self.bc.session_id, + 'data': json.dumps(fake_json)}) @requests_mock.Mocker() def test_post_request(self, m): - data = {"id": "fake_customer_id"} - m.post( - self.api_url + "Crud/Read/Customer.json", - text=json.dumps(self.fake_customer_read_json), - ) - self.assertEqual( - self.bc._post_request(data, "Read", "Customer"), - self.fake_customer_read_json, - ) + data = { + 'id': 'fake_customer_id' + } + m.post(self.api_url + 'Crud/Read/Customer.json', + text=json.dumps(self.fake_customer_read_json)) + self.assertEqual(self.bc._post_request(data, 'Read', 'Customer'), + self.fake_customer_read_json) def paginate_callback(self, request, context): # Internal method for simulating pagination @@ -207,7 +180,7 @@ def paginate_callback(self, request, context): remainder = [ {"dict": 2, "col": "C"}, {"dict": 3, "col": "D"}, - {"dict": 4, "col": "E"}, + {"dict": 4, "col": "E"} ] return {"response_data": remainder} @@ -215,161 +188,118 @@ def paginate_callback(self, request, context): @requests_mock.Mocker() def test_paginate_list(self, m): - r = [{"dict": 0, "col": "A"}, {"dict": 1, "col": "B"}] + r = [ + {"dict": 0, "col": "A"}, + {"dict": 1, "col": "B"} + ] overflow = [ {"dict": 2, "col": "C"}, {"dict": 3, "col": "D"}, - {"dict": 4, "col": "E"}, + {"dict": 4, "col": "E"} ] r_table = Table() r_table.concat(Table(r)) r_table.concat(Table(overflow)) - data = {"start": 0, "max": 2} + data = { + 'start': 0, + 'max': 2 + } object_name = "Listme" - m.post(self.api_url + f"List/{object_name}.json", json=self.paginate_callback) + m.post(self.api_url + f'List/{object_name}.json', json=self.paginate_callback) assert_matching_tables(self.bc._paginate_list(r, data, object_name), r_table) @requests_mock.Mocker() def test_get_request_response(self, m): - data = {"id": "fake_customer_id"} - m.post( - self.api_url + "Crud/Read/Customer.json", - text=json.dumps(self.fake_customer_read_json), - ) - self.assertEqual( - self.bc._get_request_response(data, "Read", "Customer", "response_data"), - self.fake_customer_read_json["response_data"], - ) + data = { + 'id': 'fake_customer_id' + } + m.post(self.api_url + 'Crud/Read/Customer.json', + text=json.dumps(self.fake_customer_read_json)) + self.assertEqual(self.bc._get_request_response(data, 'Read', 'Customer', 'response_data'), + self.fake_customer_read_json['response_data']) @requests_mock.Mocker() def test_get_user_list(self, m): - m.post(self.api_url + "List/User.json", text=json.dumps(self.fake_user_list)) - assert_matching_tables( - self.bc.get_user_list(), Table(self.fake_user_list["response_data"]) - ) + m.post(self.api_url + 'List/User.json', + text=json.dumps(self.fake_user_list)) + assert_matching_tables(self.bc.get_user_list(), + Table(self.fake_user_list['response_data'])) @requests_mock.Mocker() def test_get_customer_list(self, m): - m.post( - self.api_url + "List/Customer.json", - text=json.dumps(self.fake_customer_list), - ) - assert_matching_tables( - self.bc.get_customer_list(), Table(self.fake_customer_list["response_data"]) - ) + m.post(self.api_url + 'List/Customer.json', + text=json.dumps(self.fake_customer_list)) + assert_matching_tables(self.bc.get_customer_list(), + Table(self.fake_customer_list['response_data'])) @requests_mock.Mocker() def test_get_invoice_list(self, m): - m.post( - self.api_url + "List/Invoice.json", text=json.dumps(self.fake_invoice_list) - ) - assert_matching_tables( - self.bc.get_invoice_list(), Table(self.fake_invoice_list["response_data"]) - ) + m.post(self.api_url + 'List/Invoice.json', + text=json.dumps(self.fake_invoice_list)) + assert_matching_tables(self.bc.get_invoice_list(), + Table(self.fake_invoice_list['response_data'])) @requests_mock.Mocker() def test_read_customer(self, m): - m.post( - self.api_url + "Crud/Read/Customer.json", - text=json.dumps(self.fake_customer_read_json), - ) - self.assertEqual( - self.bc.read_customer("fake_customer_id"), - self.fake_customer_read_json["response_data"], - ) + m.post(self.api_url + 'Crud/Read/Customer.json', + text=json.dumps(self.fake_customer_read_json)) + self.assertEqual(self.bc.read_customer('fake_customer_id'), + self.fake_customer_read_json['response_data']) @requests_mock.Mocker() def test_read_invoice(self, m): - m.post( - self.api_url + "Crud/Read/Invoice.json", - text=json.dumps(self.fake_invoice_read_json), - ) - self.assertEqual( - self.bc.read_invoice("fake_invoice_id"), - self.fake_invoice_read_json["response_data"], - ) + m.post(self.api_url + 'Crud/Read/Invoice.json', + text=json.dumps(self.fake_invoice_read_json)) + self.assertEqual(self.bc.read_invoice('fake_invoice_id'), + self.fake_invoice_read_json['response_data']) def test_check_customer(self): - self.assertTrue( - self.bc.check_customer( - {"id": "fake_customer_id"}, {"id": "fake_customer_id"} - ) - ) - self.assertTrue( - self.bc.check_customer( - {"email": "fake_email@fake_email.com"}, - {"id": "fake_customer_id", "email": "fake_email@fake_email.com"}, - ) - ) - self.assertFalse( - self.bc.check_customer( - {"id": "fake_customer_id1"}, {"id": "fake_customer_id2"} - ) - ) - self.assertFalse( - self.bc.check_customer( - {"email": "fake_email1@fake_email.com"}, - {"id": "fake_customer_id2", "email": "fake_email2@fake_email.com"}, - ) - ) + self.assertTrue(self.bc.check_customer({'id': 'fake_customer_id'}, + {'id': 'fake_customer_id'})) + self.assertTrue(self.bc.check_customer({'email': 'fake_email@fake_email.com'}, + {'id': 'fake_customer_id', + 'email': 'fake_email@fake_email.com'})) + self.assertFalse(self.bc.check_customer({'id': 'fake_customer_id1'}, + {'id': 'fake_customer_id2'})) + self.assertFalse(self.bc.check_customer({'email': 'fake_email1@fake_email.com'}, + {'id': 'fake_customer_id2', + 'email': 'fake_email2@fake_email.com'})) @requests_mock.Mocker() def test_get_or_create_customer(self, m): - m.post( - self.api_url + "List/Customer.json", - text=json.dumps(self.fake_customer_list), - ) - m.post( - self.api_url + "Crud/Create/Customer.json", - text=json.dumps(self.fake_customer_read_json), - ) - self.assertEqual( - self.bc.get_or_create_customer( - "fake_customer_name", self.fake_customer_email - ), - self.fake_customer_read_json["response_data"], - ) + m.post(self.api_url + 'List/Customer.json', + text=json.dumps(self.fake_customer_list)) + m.post(self.api_url + 'Crud/Create/Customer.json', + text=json.dumps(self.fake_customer_read_json)) + self.assertEqual(self.bc.get_or_create_customer('fake_customer_name', + self.fake_customer_email), + self.fake_customer_read_json['response_data']) @requests_mock.Mocker() def test_create_invoice(self, m): - m.post( - self.api_url + "Crud/Create/Invoice.json", - text=json.dumps(self.fake_invoice_read_json), - ) - self.assertEqual( - self.bc.create_invoice( - "fake_customer_id", - "1", - self.fake_date, - self.fake_date, - self.fake_invoice_line_items, - ), - self.fake_invoice_read_json["response_data"], - ) + m.post(self.api_url + 'Crud/Create/Invoice.json', + text=json.dumps(self.fake_invoice_read_json)) + self.assertEqual(self.bc.create_invoice('fake_customer_id', + '1', + self.fake_date, + self.fake_date, + self.fake_invoice_line_items), + self.fake_invoice_read_json['response_data']) @requests_mock.Mocker() def test_send_invoice(self, m): - send_invoice_response_json = { - "response_status": 0, - "response_message": "Success", - "response_data": {}, - } - m.post( - self.api_url + "SendInvoice.json", - text=json.dumps(send_invoice_response_json), - ) - self.assertEqual( - self.bc.send_invoice( - "fake_invoice_id", - "fake_user_id", - "fake_user_email@fake_email.com", - "fake_subject", - "fake_message_body", - ), - {}, - ) + send_invoice_response_json = {'response_status': 0, + 'response_message': 'Success', + 'response_data': {}} + m.post(self.api_url + 'SendInvoice.json', + text=json.dumps(send_invoice_response_json)) + self.assertEqual(self.bc.send_invoice('fake_invoice_id', + 'fake_user_id', + 'fake_user_email@fake_email.com', + 'fake_subject', + 'fake_message_body'), {}) diff --git a/test/test_bloomerang/test_bloomerang.py b/test/test_bloomerang/test_bloomerang.py index c4bb2a343d..b71d7fb8a9 100644 --- a/test/test_bloomerang/test_bloomerang.py +++ b/test/test_bloomerang/test_bloomerang.py @@ -3,176 +3,137 @@ import requests_mock from unittest import mock from test.utils import assert_matching_tables -from parsons import Bloomerang, Table - -from test.test_bloomerang.test_data import ( - ENV_PARAMETERS, - ID, - TEST_DELETE, - TEST_CREATE_CONSTITUENT, - TEST_GET_CONSTITUENT, - TEST_GET_CONSTITUENTS, - TEST_CREATE_TRANSACTION, - TEST_GET_TRANSACTION, - TEST_GET_TRANSACTIONS, - TEST_CREATE_INTERACTION, - TEST_GET_INTERACTION, - TEST_GET_INTERACTIONS, -) +from parsons.bloomerang.bloomerang import Bloomerang +from parsons.etl import Table + +from test.test_bloomerang.test_data import ENV_PARAMETERS, ID, TEST_DELETE, \ + TEST_CREATE_CONSTITUENT, TEST_GET_CONSTITUENT, TEST_GET_CONSTITUENTS, \ + TEST_CREATE_TRANSACTION, TEST_GET_TRANSACTION, TEST_GET_TRANSACTIONS, \ + TEST_CREATE_INTERACTION, TEST_GET_INTERACTION, TEST_GET_INTERACTIONS class TestBloomerang(unittest.TestCase): + def setUp(self): - self.bloomerang = Bloomerang(api_key="test_key") + self.bloomerang = Bloomerang(api_key='test_key') @mock.patch.dict(os.environ, ENV_PARAMETERS) def test_init_env(self): bloomerang = Bloomerang() - self.assertEqual(bloomerang.api_key, "env_api_key") - self.assertEqual(bloomerang.client_id, "env_client_id") - self.assertEqual(bloomerang.client_secret, "env_client_secret") + self.assertEqual(bloomerang.api_key, 'env_api_key') + self.assertEqual(bloomerang.client_id, 'env_client_id') + self.assertEqual(bloomerang.client_secret, 'env_client_secret') @requests_mock.Mocker() def test_authentication(self, m): # API key - bloomerang = Bloomerang(api_key="my_key") - self.assertEqual(bloomerang.conn.headers["X-API-KEY"], "my_key") + bloomerang = Bloomerang(api_key='my_key') + self.assertEqual(bloomerang.conn.headers['X-API-KEY'], 'my_key') # OAuth2 - m.post(url=bloomerang.uri_auth, json={"code": "my_auth_code"}) - m.post( - url=bloomerang.uri + "oauth/token", json={"access_token": "my_access_token"} - ) - bloomerang = Bloomerang(client_id="my_id", client_secret="my_secret") - self.assertEqual(bloomerang.authorization_code, "my_auth_code") - self.assertEqual(bloomerang.access_token, "my_access_token") - self.assertEqual( - bloomerang.conn.headers["Authorization"], "Bearer my_access_token" - ) + m.post(url=bloomerang.uri_auth, json={'code': 'my_auth_code'}) + m.post(url=bloomerang.uri + 'oauth/token', json={'access_token': 'my_access_token'}) + bloomerang = Bloomerang(client_id='my_id', client_secret='my_secret') + self.assertEqual(bloomerang.authorization_code, 'my_auth_code') + self.assertEqual(bloomerang.access_token, 'my_access_token') + self.assertEqual(bloomerang.conn.headers['Authorization'], 'Bearer my_access_token') def test_base_endpoint(self): - url = self.bloomerang._base_endpoint("constituent") - self.assertEqual(url, "https://api.bloomerang.co/v2/constituent/") + url = self.bloomerang._base_endpoint('constituent') + self.assertEqual(url, 'https://api.bloomerang.co/v2/constituent/') - url = self.bloomerang._base_endpoint("constituent", 1234) - self.assertEqual(url, "https://api.bloomerang.co/v2/constituent/1234/") + url = self.bloomerang._base_endpoint('constituent', 1234) + self.assertEqual(url, 'https://api.bloomerang.co/v2/constituent/1234/') - url = self.bloomerang._base_endpoint("constituent", "1234") - self.assertEqual(url, "https://api.bloomerang.co/v2/constituent/1234/") + url = self.bloomerang._base_endpoint('constituent', '1234') + self.assertEqual(url, 'https://api.bloomerang.co/v2/constituent/1234/') @requests_mock.Mocker() def test_create_constituent(self, m): - m.post(f"{self.bloomerang.uri}constituent/", json=TEST_CREATE_CONSTITUENT) + m.post(f'{self.bloomerang.uri}constituent/', json=TEST_CREATE_CONSTITUENT) self.assertEqual(self.bloomerang.create_constituent(), TEST_CREATE_CONSTITUENT) @requests_mock.Mocker() def test_update_constituent(self, m): - m.put(f"{self.bloomerang.uri}constituent/{ID}/", json=TEST_CREATE_CONSTITUENT) - self.assertEqual( - self.bloomerang.update_constituent(ID), TEST_CREATE_CONSTITUENT - ) + m.put(f'{self.bloomerang.uri}constituent/{ID}/', json=TEST_CREATE_CONSTITUENT) + self.assertEqual(self.bloomerang.update_constituent(ID), TEST_CREATE_CONSTITUENT) @requests_mock.Mocker() def test_get_constituent(self, m): - m.get(f"{self.bloomerang.uri}constituent/{ID}/", json=TEST_GET_CONSTITUENT) + m.get(f'{self.bloomerang.uri}constituent/{ID}/', json=TEST_GET_CONSTITUENT) self.assertEqual(self.bloomerang.get_constituent(ID), TEST_GET_CONSTITUENT) @requests_mock.Mocker() def test_delete_constituent(self, m): - m.delete(f"{self.bloomerang.uri}constituent/{ID}/", json=TEST_DELETE) + m.delete(f'{self.bloomerang.uri}constituent/{ID}/', json=TEST_DELETE) self.assertEqual(self.bloomerang.delete_constituent(ID), TEST_DELETE) @requests_mock.Mocker() def test_get_constituents(self, m): - m.get( - f"{self.bloomerang.uri}constituents/?skip=0&take=50", - json=TEST_GET_CONSTITUENTS, - ) - assert_matching_tables( - self.bloomerang.get_constituents(), Table(TEST_GET_CONSTITUENTS["Results"]) - ) + m.get(f'{self.bloomerang.uri}constituents/?skip=0&take=50', json=TEST_GET_CONSTITUENTS) + assert_matching_tables(self.bloomerang.get_constituents(), + Table(TEST_GET_CONSTITUENTS['Results'])) @requests_mock.Mocker() def test_create_transaction(self, m): - m.post(f"{self.bloomerang.uri}transaction/", json=TEST_CREATE_TRANSACTION) + m.post(f'{self.bloomerang.uri}transaction/', json=TEST_CREATE_TRANSACTION) self.assertEqual(self.bloomerang.create_transaction(), TEST_CREATE_TRANSACTION) @requests_mock.Mocker() def test_update_transaction(self, m): - m.put(f"{self.bloomerang.uri}transaction/{ID}/", json=TEST_CREATE_TRANSACTION) - self.assertEqual( - self.bloomerang.update_transaction(ID), TEST_CREATE_TRANSACTION - ) + m.put(f'{self.bloomerang.uri}transaction/{ID}/', json=TEST_CREATE_TRANSACTION) + self.assertEqual(self.bloomerang.update_transaction(ID), TEST_CREATE_TRANSACTION) @requests_mock.Mocker() def test_get_transaction(self, m): - m.get(f"{self.bloomerang.uri}transaction/{ID}/", json=TEST_GET_TRANSACTION) + m.get(f'{self.bloomerang.uri}transaction/{ID}/', json=TEST_GET_TRANSACTION) self.assertEqual(self.bloomerang.get_transaction(ID), TEST_GET_TRANSACTION) @requests_mock.Mocker() def test_delete_transaction(self, m): - m.delete(f"{self.bloomerang.uri}transaction/{ID}/", json=TEST_DELETE) + m.delete(f'{self.bloomerang.uri}transaction/{ID}/', json=TEST_DELETE) self.assertEqual(self.bloomerang.delete_transaction(ID), TEST_DELETE) @requests_mock.Mocker() def test_get_transactions(self, m): - m.get( - f"{self.bloomerang.uri}transactions/?skip=0&take=50", - json=TEST_GET_TRANSACTIONS, - ) - assert_matching_tables( - self.bloomerang.get_transactions(), Table(TEST_GET_TRANSACTIONS["Results"]) - ) + m.get(f'{self.bloomerang.uri}transactions/?skip=0&take=50', json=TEST_GET_TRANSACTIONS) + assert_matching_tables(self.bloomerang.get_transactions(), + Table(TEST_GET_TRANSACTIONS['Results'])) @requests_mock.Mocker() def test_get_transaction_designation(self, m): - m.get( - f"{self.bloomerang.uri}transaction/designation/{ID}/", - json=TEST_GET_TRANSACTION, - ) - self.assertEqual( - self.bloomerang.get_transaction_designation(ID), TEST_GET_TRANSACTION - ) + m.get(f'{self.bloomerang.uri}transaction/designation/{ID}/', json=TEST_GET_TRANSACTION) + self.assertEqual(self.bloomerang.get_transaction_designation(ID), TEST_GET_TRANSACTION) @requests_mock.Mocker() def test_get_transaction_designations(self, m): - m.get( - f"{self.bloomerang.uri}transactions/designations/?skip=0&take=50", - json=TEST_GET_TRANSACTIONS, - ) - assert_matching_tables( - self.bloomerang.get_transaction_designations(), - Table(TEST_GET_TRANSACTIONS["Results"]), - ) + m.get(f'{self.bloomerang.uri}transactions/designations/?skip=0&take=50', + json=TEST_GET_TRANSACTIONS) + assert_matching_tables(self.bloomerang.get_transaction_designations(), + Table(TEST_GET_TRANSACTIONS['Results'])) @requests_mock.Mocker() def test_create_interaction(self, m): - m.post(f"{self.bloomerang.uri}interaction/", json=TEST_CREATE_INTERACTION) + m.post(f'{self.bloomerang.uri}interaction/', json=TEST_CREATE_INTERACTION) self.assertEqual(self.bloomerang.create_interaction(), TEST_CREATE_INTERACTION) @requests_mock.Mocker() def test_update_interaction(self, m): - m.put(f"{self.bloomerang.uri}interaction/{ID}/", json=TEST_CREATE_INTERACTION) - self.assertEqual( - self.bloomerang.update_interaction(ID), TEST_CREATE_INTERACTION - ) + m.put(f'{self.bloomerang.uri}interaction/{ID}/', json=TEST_CREATE_INTERACTION) + self.assertEqual(self.bloomerang.update_interaction(ID), TEST_CREATE_INTERACTION) @requests_mock.Mocker() def test_get_interaction(self, m): - m.get(f"{self.bloomerang.uri}interaction/{ID}/", json=TEST_GET_INTERACTION) + m.get(f'{self.bloomerang.uri}interaction/{ID}/', json=TEST_GET_INTERACTION) self.assertEqual(self.bloomerang.get_interaction(ID), TEST_GET_INTERACTION) @requests_mock.Mocker() def test_delete_interaction(self, m): - m.delete(f"{self.bloomerang.uri}interaction/{ID}/", json=TEST_DELETE) + m.delete(f'{self.bloomerang.uri}interaction/{ID}/', json=TEST_DELETE) self.assertEqual(self.bloomerang.delete_interaction(ID), TEST_DELETE) @requests_mock.Mocker() def test_get_interactions(self, m): - m.get( - f"{self.bloomerang.uri}interactions/?skip=0&take=50", - json=TEST_GET_INTERACTIONS, - ) - assert_matching_tables( - self.bloomerang.get_interactions(), Table(TEST_GET_INTERACTIONS["Results"]) - ) + m.get(f'{self.bloomerang.uri}interactions/?skip=0&take=50', json=TEST_GET_INTERACTIONS) + assert_matching_tables(self.bloomerang.get_interactions(), + Table(TEST_GET_INTERACTIONS['Results'])) diff --git a/test/test_bloomerang/test_data.py b/test/test_bloomerang/test_data.py index 93270a7c6f..89e61a4292 100644 --- a/test/test_bloomerang/test_data.py +++ b/test/test_bloomerang/test_data.py @@ -1,12 +1,16 @@ ENV_PARAMETERS = { - "BLOOMERANG_API_KEY": "env_api_key", - "BLOOMERANG_CLIENT_ID": "env_client_id", - "BLOOMERANG_CLIENT_SECRET": "env_client_secret", + 'BLOOMERANG_API_KEY': 'env_api_key', + 'BLOOMERANG_CLIENT_ID': 'env_client_id', + 'BLOOMERANG_CLIENT_SECRET': 'env_client_secret' } ID = 123 -TEST_DELETE = {"Id": 0, "Type": "string", "Deleted": "true"} +TEST_DELETE = { + "Id": 0, + "Type": "string", + "Deleted": 'true' +} TEST_CREATE_CONSTITUENT = { "Type": "Individual", @@ -35,8 +39,8 @@ "AccountId": 0, "Type": "Home", "Value": "user@example.com", - "IsPrimary": "true", - "IsBad": "true", + "IsPrimary": 'true', + "IsBad": 'true' }, "PrimaryPhone": { "Id": 0, @@ -44,7 +48,7 @@ "Type": "Home", "Extension": "string", "Number": "string", - "IsPrimary": "true", + "IsPrimary": 'true' }, "PrimaryAddress": { "Id": 0, @@ -55,17 +59,17 @@ "State": "string", "PostalCode": "string", "Country": "string", - "IsPrimary": "true", - "IsBad": "true", - }, + "IsPrimary": 'true', + "IsBad": 'true' + } } TEST_GET_CONSTITUENT = { "Id": 0, "AccountNumber": 0, - "IsInHousehold": "true", - "IsHeadOfHousehold": "true", - "IsFavorite": "true", + "IsInHousehold": 'true', + "IsHeadOfHousehold": 'true', + "IsFavorite": 'true', "FullCustomProfileImageId": 0, "FullCustomProfileImageUrl": "string", "CroppedCustomProfileImageId": 0, @@ -96,8 +100,8 @@ "AccountId": 0, "Type": "Home", "Value": "user@example.com", - "IsPrimary": "true", - "IsBad": "true", + "IsPrimary": 'true', + "IsBad": 'true' }, "PrimaryPhone": { "Id": 0, @@ -105,14 +109,21 @@ "Type": "Home", "Extension": "string", "Number": "string", - "IsPrimary": "true", + "IsPrimary": 'true' }, "HouseholdId": 0, "PreferredCommunicationChannel": "Email", - "CommunicationRestrictions": ["DoNotCall"], + "CommunicationRestrictions": [ + "DoNotCall" + ], "CommunicationRestrictionsUpdateReason": "string", "EmailInterestType": "All", - "CustomEmailInterests": [{"Id": 0, "Name": "string"}], + "CustomEmailInterests": [ + { + "Id": 0, + "Name": "string" + } + ], "EmailInterestsUpdateReason": "string", "PrimaryAddress": { "Id": 0, @@ -123,10 +134,10 @@ "State": "string", "PostalCode": "string", "Country": "string", - "IsPrimary": "true", - "IsBad": "true", + "IsPrimary": 'true', + "IsBad": 'true', "StateAbbreviation": "string", - "CountryCode": "string", + "CountryCode": "string" }, "EngagementScore": "Low", "DonorSearchInfo": { @@ -140,23 +151,43 @@ "LargestGiftMax": 0, "WealthAskMin": 0, "WealthAskMax": 0, - "BusinessExecutive": "true", + "BusinessExecutive": 'true', "NamesScreened": "string", - "DateTimeScreenedUtc": "string", + "DateTimeScreenedUtc": "string" }, - "AddressIds": [0], - "EmailIds": [0], - "PhoneIds": [0], + "AddressIds": [ + 0 + ], + "EmailIds": [ + 0 + ], + "PhoneIds": [ + 0 + ], "CustomValues": [ - {"FieldId": 0, "Value": {"Id": 0, "Value": "string"}}, - {"FieldId": 0, "Values": [{"Id": 0, "Value": "string"}]}, + { + "FieldId": 0, + "Value": { + "Id": 0, + "Value": "string" + } + }, + { + "FieldId": 0, + "Values": [ + { + "Id": 0, + "Value": "string" + } + ] + } ], "AuditTrail": { "CreatedDate": "2020-09-08T16:06:59.945Z", "CreatedName": "string", "LastModifiedDate": "2020-09-08T16:06:59.945Z", - "LastModifiedName": "string", - }, + "LastModifiedName": "string" + } } TEST_GET_CONSTITUENTS = { @@ -168,9 +199,9 @@ { "Id": 0, "AccountNumber": 0, - "IsInHousehold": "true", - "IsHeadOfHousehold": "true", - "IsFavorite": "true", + "IsInHousehold": 'true', + "IsHeadOfHousehold": 'true', + "IsFavorite": 'true', "FullCustomProfileImageId": 0, "FullCustomProfileImageUrl": "string", "CroppedCustomProfileImageId": 0, @@ -201,8 +232,8 @@ "AccountId": 0, "Type": "Home", "Value": "user@example.com", - "IsPrimary": "true", - "IsBad": "true", + "IsPrimary": 'true', + "IsBad": 'true' }, "PrimaryPhone": { "Id": 0, @@ -210,14 +241,21 @@ "Type": "Home", "Extension": "string", "Number": "string", - "IsPrimary": "true", + "IsPrimary": 'true' }, "HouseholdId": 0, "PreferredCommunicationChannel": "Email", - "CommunicationRestrictions": ["DoNotCall"], + "CommunicationRestrictions": [ + "DoNotCall" + ], "CommunicationRestrictionsUpdateReason": "string", "EmailInterestType": "All", - "CustomEmailInterests": [{"Id": 0, "Name": "string"}], + "CustomEmailInterests": [ + { + "Id": 0, + "Name": "string" + } + ], "EmailInterestsUpdateReason": "string", "EngagementScore": "Low", "DonorSearchInfo": { @@ -231,25 +269,45 @@ "LargestGiftMax": 0, "WealthAskMin": 0, "WealthAskMax": 0, - "BusinessExecutive": "true", + "BusinessExecutive": 'true', "NamesScreened": "string", - "DateTimeScreenedUtc": "string", + "DateTimeScreenedUtc": "string" }, - "AddressIds": [0], - "EmailIds": [0], - "PhoneIds": [0], + "AddressIds": [ + 0 + ], + "EmailIds": [ + 0 + ], + "PhoneIds": [ + 0 + ], "CustomValues": [ - {"FieldId": 0, "Value": {"Id": 0, "Value": "string"}}, - {"FieldId": 0, "Values": [{"Id": 0, "Value": "string"}]}, + { + "FieldId": 0, + "Value": { + "Id": 0, + "Value": "string" + } + }, + { + "FieldId": 0, + "Values": [ + { + "Id": 0, + "Value": "string" + } + ] + } ], "AuditTrail": { "CreatedDate": "2020-09-05T01:40:43.035Z", "CreatedName": "string", "LastModifiedDate": "2020-09-05T01:40:43.035Z", - "LastModifiedName": "string", - }, + "LastModifiedName": "string" + } } - ], + ] } TEST_CREATE_TRANSACTION = { @@ -275,8 +333,10 @@ { "Amount": 0, "Note": "string", - "AcknowledgementStatus": "true", - "AcknowledgementInteractionIds": [0], + "AcknowledgementStatus": 'true', + "AcknowledgementInteractionIds": [ + 0 + ], "Type": "Donation", "NonDeductibleAmount": 0, "FundId": 0, @@ -284,34 +344,68 @@ "CampaignId": 0, "AppealId": 0, "TributeId": 0, - "SoftCredits": ["null"], + "SoftCredits": [ + 'null' + ], "CustomValues": [ - {"FieldId": 0, "Value": "string"}, - {"FieldId": 0, "ValueId": 0}, - {"FieldId": 0, "ValueIds": [0]}, + { + "FieldId": 0, + "Value": "string" + }, + { + "FieldId": 0, + "ValueId": 0 + }, + { + "FieldId": 0, + "ValueIds": [ + 0 + ] + } ], - "Attachments": ["null", "null"], + "Attachments": [ + 'null', + 'null' + ] }, { "Amount": 0, "Note": "string", - "AcknowledgementStatus": "true", - "AcknowledgementInteractionIds": [0], + "AcknowledgementStatus": 'true', + "AcknowledgementInteractionIds": [ + 0 + ], "Type": "PledgePayment", "PledgeId": 0, "CustomValues": [ - {"FieldId": 0, "Value": "string"}, - {"FieldId": 0, "ValueId": 0}, - {"FieldId": 0, "ValueIds": [0]}, + { + "FieldId": 0, + "Value": "string" + }, + { + "FieldId": 0, + "ValueId": 0 + }, + { + "FieldId": 0, + "ValueIds": [ + 0 + ] + } ], - "Attachments": ["null", "null"], + "Attachments": [ + 'null', + 'null' + ] }, { "RecurringDonationEndDate": "2020-09-08", "Amount": 0, "Note": "string", - "AcknowledgementStatus": "true", - "AcknowledgementInteractionIds": [0], + "AcknowledgementStatus": 'true', + "AcknowledgementInteractionIds": [ + 0 + ], "RecurringDonationFrequency": "Weekly", "RecurringDonationDay1": 0, "RecurringDonationDay2": 0, @@ -322,38 +416,80 @@ "CampaignId": 0, "AppealId": 0, "TributeId": 0, - "SoftCredits": ["null"], + "SoftCredits": [ + 'null' + ], "CustomValues": [ - {"FieldId": 0, "Value": "string"}, - {"FieldId": 0, "ValueId": 0}, - {"FieldId": 0, "ValueIds": [0]}, + { + "FieldId": 0, + "Value": "string" + }, + { + "FieldId": 0, + "ValueId": 0 + }, + { + "FieldId": 0, + "ValueIds": [ + 0 + ] + } ], - "Attachments": ["null", "null"], + "Attachments": [ + 'null', + 'null' + ] }, { "Amount": 0, "Note": "string", - "AcknowledgementStatus": "true", - "AcknowledgementInteractionIds": [0], + "AcknowledgementStatus": 'true', + "AcknowledgementInteractionIds": [ + 0 + ], "Type": "RecurringDonationPayment", "RecurringDonationId": 0, "FundId": 0, "QuickbooksAccountId": 0, "CampaignId": 0, "AppealId": 0, - "IsExtraPayment": "true", + "IsExtraPayment": 'true', "CustomValues": [ - {"FieldId": 0, "Value": "string"}, - {"FieldId": 0, "ValueId": 0}, - {"FieldId": 0, "ValueIds": [0]}, + { + "FieldId": 0, + "Value": "string" + }, + { + "FieldId": 0, + "ValueId": 0 + }, + { + "FieldId": 0, + "ValueIds": [ + 0 + ] + } ], - "Attachments": ["null", "null"], - }, + "Attachments": [ + 'null', + 'null' + ] + } ], "Attachments": [ - {"Guid": "string", "Name": "string", "Extension": "string", "Url": "string"}, - {"Id": 0, "Name": "string", "Extension": "string", "Url": "string"}, - ], + { + "Guid": "string", + "Name": "string", + "Extension": "string", + "Url": "string" + }, + { + "Id": 0, + "Name": "string", + "Extension": "string", + "Url": "string" + } + ] } TEST_GET_TRANSACTION = { @@ -385,27 +521,62 @@ "Amount": 0, "NonDeductibleAmount": 0, "Note": "string", - "AcknowledgementStatus": "true", - "AcknowledgementInteractionIds": [0], - "Fund": {"Id": 0, "Name": "string"}, - "QuickbooksAccount": {"Id": 0, "Name": "string"}, - "Campaign": {"Id": 0, "Name": "string"}, - "Appeal": {"Id": 0, "Name": "string"}, - "Tribute": {"Id": 0, "Name": "string"}, + "AcknowledgementStatus": 'true', + "AcknowledgementInteractionIds": [ + 0 + ], + "Fund": { + "Id": 0, + "Name": "string" + }, + "QuickbooksAccount": { + "Id": 0, + "Name": "string" + }, + "Campaign": { + "Id": 0, + "Name": "string" + }, + "Appeal": { + "Id": 0, + "Name": "string" + }, + "Tribute": { + "Id": 0, + "Name": "string" + }, "TributeType": "InHonorOf", - "SoftCreditIds": [0], - "AttachmentIds": [0], + "SoftCreditIds": [ + 0 + ], + "AttachmentIds": [ + 0 + ], "CustomValues": [ - {"FieldId": 0, "Value": {"Id": 0, "Value": "string"}}, - {"FieldId": 0, "Values": [{"Id": 0, "Value": "string"}]}, + { + "FieldId": 0, + "Value": { + "Id": 0, + "Value": "string" + } + }, + { + "FieldId": 0, + "Values": [ + { + "Id": 0, + "Value": "string" + } + ] + } ], "AuditTrail": { "CreatedDate": "2020-09-08T14:15:24.293Z", "CreatedName": "string", "LastModifiedDate": "2020-09-08T14:15:24.293Z", - "LastModifiedName": "string", + "LastModifiedName": "string" }, - "Type": "Donation", + "Type": "Donation" }, { "Id": 0, @@ -414,35 +585,77 @@ "Amount": 0, "NonDeductibleAmount": 0, "Note": "string", - "AcknowledgementStatus": "true", - "AcknowledgementInteractionIds": [0], - "Fund": {"Id": 0, "Name": "string"}, - "QuickbooksAccount": {"Id": 0, "Name": "string"}, - "Campaign": {"Id": 0, "Name": "string"}, - "Appeal": {"Id": 0, "Name": "string"}, - "Tribute": {"Id": 0, "Name": "string"}, + "AcknowledgementStatus": 'true', + "AcknowledgementInteractionIds": [ + 0 + ], + "Fund": { + "Id": 0, + "Name": "string" + }, + "QuickbooksAccount": { + "Id": 0, + "Name": "string" + }, + "Campaign": { + "Id": 0, + "Name": "string" + }, + "Appeal": { + "Id": 0, + "Name": "string" + }, + "Tribute": { + "Id": 0, + "Name": "string" + }, "TributeType": "InHonorOf", - "SoftCreditIds": [0], - "AttachmentIds": [0], + "SoftCreditIds": [ + 0 + ], + "AttachmentIds": [ + 0 + ], "CustomValues": [ - {"FieldId": 0, "Value": {"Id": 0, "Value": "string"}}, - {"FieldId": 0, "Values": [{"Id": 0, "Value": "string"}]}, + { + "FieldId": 0, + "Value": { + "Id": 0, + "Value": "string" + } + }, + { + "FieldId": 0, + "Values": [ + { + "Id": 0, + "Value": "string" + } + ] + } ], "AuditTrail": { "CreatedDate": "2020-09-08T14:15:24.293Z", "CreatedName": "string", "LastModifiedDate": "2020-09-08T14:15:24.293Z", - "LastModifiedName": "string", + "LastModifiedName": "string" }, "Type": "Pledge", - "PledgePaymentIds": [0], + "PledgePaymentIds": [ + 0 + ], "PledgeInstallments": [ - {"Id": 0, "PledgeId": 0, "Date": "2020-09-08", "Amount": 0} + { + "Id": 0, + "PledgeId": 0, + "Date": "2020-09-08", + "Amount": 0 + } ], "PledgeBalance": 0, "PledgeStatus": "InGoodStanding", "PledgeAmountInArrears": 0, - "PledgeNextInstallmentDate": "2020-09-08", + "PledgeNextInstallmentDate": "2020-09-08" }, { "Id": 0, @@ -451,28 +664,63 @@ "Amount": 0, "NonDeductibleAmount": 0, "Note": "string", - "AcknowledgementStatus": "true", - "AcknowledgementInteractionIds": [0], - "Fund": {"Id": 0, "Name": "string"}, - "QuickbooksAccount": {"Id": 0, "Name": "string"}, - "Campaign": {"Id": 0, "Name": "string"}, - "Appeal": {"Id": 0, "Name": "string"}, - "Tribute": {"Id": 0, "Name": "string"}, + "AcknowledgementStatus": 'true', + "AcknowledgementInteractionIds": [ + 0 + ], + "Fund": { + "Id": 0, + "Name": "string" + }, + "QuickbooksAccount": { + "Id": 0, + "Name": "string" + }, + "Campaign": { + "Id": 0, + "Name": "string" + }, + "Appeal": { + "Id": 0, + "Name": "string" + }, + "Tribute": { + "Id": 0, + "Name": "string" + }, "TributeType": "InHonorOf", - "SoftCreditIds": [0], - "AttachmentIds": [0], + "SoftCreditIds": [ + 0 + ], + "AttachmentIds": [ + 0 + ], "CustomValues": [ - {"FieldId": 0, "Value": {"Id": 0, "Value": "string"}}, - {"FieldId": 0, "Values": [{"Id": 0, "Value": "string"}]}, + { + "FieldId": 0, + "Value": { + "Id": 0, + "Value": "string" + } + }, + { + "FieldId": 0, + "Values": [ + { + "Id": 0, + "Value": "string" + } + ] + } ], "AuditTrail": { "CreatedDate": "2020-09-08T14:15:24.293Z", "CreatedName": "string", "LastModifiedDate": "2020-09-08T14:15:24.293Z", - "LastModifiedName": "string", + "LastModifiedName": "string" }, "Type": "PledgePayment", - "PledgeId": 0, + "PledgeId": 0 }, { "Id": 0, @@ -481,25 +729,60 @@ "Amount": 0, "NonDeductibleAmount": 0, "Note": "string", - "AcknowledgementStatus": "true", - "AcknowledgementInteractionIds": [0], - "Fund": {"Id": 0, "Name": "string"}, - "QuickbooksAccount": {"Id": 0, "Name": "string"}, - "Campaign": {"Id": 0, "Name": "string"}, - "Appeal": {"Id": 0, "Name": "string"}, - "Tribute": {"Id": 0, "Name": "string"}, + "AcknowledgementStatus": 'true', + "AcknowledgementInteractionIds": [ + 0 + ], + "Fund": { + "Id": 0, + "Name": "string" + }, + "QuickbooksAccount": { + "Id": 0, + "Name": "string" + }, + "Campaign": { + "Id": 0, + "Name": "string" + }, + "Appeal": { + "Id": 0, + "Name": "string" + }, + "Tribute": { + "Id": 0, + "Name": "string" + }, "TributeType": "InHonorOf", - "SoftCreditIds": [0], - "AttachmentIds": [0], + "SoftCreditIds": [ + 0 + ], + "AttachmentIds": [ + 0 + ], "CustomValues": [ - {"FieldId": 0, "Value": {"Id": 0, "Value": "string"}}, - {"FieldId": 0, "Values": [{"Id": 0, "Value": "string"}]}, + { + "FieldId": 0, + "Value": { + "Id": 0, + "Value": "string" + } + }, + { + "FieldId": 0, + "Values": [ + { + "Id": 0, + "Value": "string" + } + ] + } ], "AuditTrail": { "CreatedDate": "2020-09-08T14:15:24.293Z", "CreatedName": "string", "LastModifiedDate": "2020-09-08T14:15:24.293Z", - "LastModifiedName": "string", + "LastModifiedName": "string" }, "RecurringDonationEndDate": "2020-09-08", "RecurringDonationFrequency": "Weekly", @@ -507,10 +790,12 @@ "RecurringDonationDay2": 0, "RecurringDonationStartDate": "2020-09-08", "Type": "RecurringDonation", - "RecurringDonationPaymentIds": [0], + "RecurringDonationPaymentIds": [ + 0 + ], "RecurringDonationNextInstallmentDate": "2020-09-08", "RecurringDonationLastPaymentStatus": "AtRisk", - "RecurringDonationStatus": "Active", + "RecurringDonationStatus": "Active" }, { "Id": 0, @@ -519,39 +804,78 @@ "Amount": 0, "NonDeductibleAmount": 0, "Note": "string", - "AcknowledgementStatus": "true", - "AcknowledgementInteractionIds": [0], - "Fund": {"Id": 0, "Name": "string"}, - "QuickbooksAccount": {"Id": 0, "Name": "string"}, - "Campaign": {"Id": 0, "Name": "string"}, - "Appeal": {"Id": 0, "Name": "string"}, - "Tribute": {"Id": 0, "Name": "string"}, + "AcknowledgementStatus": 'true', + "AcknowledgementInteractionIds": [ + 0 + ], + "Fund": { + "Id": 0, + "Name": "string" + }, + "QuickbooksAccount": { + "Id": 0, + "Name": "string" + }, + "Campaign": { + "Id": 0, + "Name": "string" + }, + "Appeal": { + "Id": 0, + "Name": "string" + }, + "Tribute": { + "Id": 0, + "Name": "string" + }, "TributeType": "InHonorOf", - "SoftCreditIds": [0], - "AttachmentIds": [0], + "SoftCreditIds": [ + 0 + ], + "AttachmentIds": [ + 0 + ], "CustomValues": [ - {"FieldId": 0, "Value": {"Id": 0, "Value": "string"}}, - {"FieldId": 0, "Values": [{"Id": 0, "Value": "string"}]}, + { + "FieldId": 0, + "Value": { + "Id": 0, + "Value": "string" + } + }, + { + "FieldId": 0, + "Values": [ + { + "Id": 0, + "Value": "string" + } + ] + } ], "AuditTrail": { "CreatedDate": "2020-09-08T14:15:24.293Z", "CreatedName": "string", "LastModifiedDate": "2020-09-08T14:15:24.293Z", - "LastModifiedName": "string", + "LastModifiedName": "string" }, "Type": "RecurringDonationPayment", - "RecurringDonationId": 0, - }, + "RecurringDonationId": 0 + } + ], + "AttachmentIds": [ + 0 + ], + "IsRefunded": 'true', + "RefundIds": [ + 0 ], - "AttachmentIds": [0], - "IsRefunded": "true", - "RefundIds": [0], "AuditTrail": { "CreatedDate": "2020-09-08T14:15:24.293Z", "CreatedName": "string", "LastModifiedDate": "2020-09-08T14:15:24.293Z", - "LastModifiedName": "string", - }, + "LastModifiedName": "string" + } } TEST_GET_TRANSACTIONS = { @@ -581,18 +905,28 @@ "InKindType": "Goods", "InKindMarketValue": 0, "IntegrationUrl": "string", - "Designations": ["null", "null", "null", "null", "null"], - "AttachmentIds": [0], - "IsRefunded": "true", - "RefundIds": [0], + "Designations": [ + 'null', + 'null', + 'null', + 'null', + 'null' + ], + "AttachmentIds": [ + 0 + ], + "IsRefunded": 'true', + "RefundIds": [ + 0 + ], "AuditTrail": { "CreatedDate": "2020-09-08T16:11:30.821Z", "CreatedName": "string", "LastModifiedDate": "2020-09-08T16:11:30.821Z", - "LastModifiedName": "string", - }, + "LastModifiedName": "string" + } } - ], + ] } TEST_CREATE_INTERACTION = { @@ -602,16 +936,37 @@ "Channel": "Email", "Purpose": "Acknowledgement", "Subject": "string", - "IsInbound": "true", + "IsInbound": 'true', "CustomValues": [ - {"FieldId": 0, "Value": "string"}, - {"FieldId": 0, "ValueId": 0}, - {"FieldId": 0, "ValueIds": [0]}, + { + "FieldId": 0, + "Value": "string" + }, + { + "FieldId": 0, + "ValueId": 0 + }, + { + "FieldId": 0, + "ValueIds": [ + 0 + ] + } ], "Attachments": [ - {"Guid": "string", "Name": "string", "Extension": "string", "Url": "string"}, - {"Id": 0, "Name": "string", "Extension": "string", "Url": "string"}, - ], + { + "Guid": "string", + "Name": "string", + "Extension": "string", + "Url": "string" + }, + { + "Id": 0, + "Name": "string", + "Extension": "string", + "Url": "string" + } + ] } TEST_GET_INTERACTION = { @@ -621,26 +976,46 @@ "Channel": "Email", "Purpose": "Acknowledgement", "Subject": "string", - "IsInbound": "true", + "IsInbound": 'true', "AccountId": 0, "TweetId": "string", - "IsBcc": "true", + "IsBcc": 'true', "EmailAddress": "user@example.com", - "AttachmentIds": [0], - "LetterAttachmentIds": [0], - "SurveyLapsedResponses": ["string"], + "AttachmentIds": [ + 0 + ], + "LetterAttachmentIds": [ + 0 + ], + "SurveyLapsedResponses": [ + "string" + ], "SurveyEmailInteractionId": 0, "SurveyResponseInteractionId": 0, "CustomValues": [ - {"FieldId": 0, "Value": {"Id": 0, "Value": "string"}}, - {"FieldId": 0, "Values": [{"Id": 0, "Value": "string"}]}, + { + "FieldId": 0, + "Value": { + "Id": 0, + "Value": "string" + } + }, + { + "FieldId": 0, + "Values": [ + { + "Id": 0, + "Value": "string" + } + ] + } ], "AuditTrail": { "CreatedDate": "2020-09-08T15:27:32.767Z", "CreatedName": "string", "LastModifiedDate": "2020-09-08T15:27:32.767Z", - "LastModifiedName": "string", - }, + "LastModifiedName": "string" + } } TEST_GET_INTERACTIONS = { @@ -656,26 +1031,46 @@ "Channel": "Email", "Purpose": "Acknowledgement", "Subject": "string", - "IsInbound": "true", + "IsInbound": 'true', "AccountId": 0, "TweetId": "string", - "IsBcc": "true", + "IsBcc": 'true', "EmailAddress": "user@example.com", - "AttachmentIds": [0], - "LetterAttachmentIds": [0], - "SurveyLapsedResponses": ["string"], + "AttachmentIds": [ + 0 + ], + "LetterAttachmentIds": [ + 0 + ], + "SurveyLapsedResponses": [ + "string" + ], "SurveyEmailInteractionId": 0, "SurveyResponseInteractionId": 0, "CustomValues": [ - {"FieldId": 0, "Value": {"Id": 0, "Value": "string"}}, - {"FieldId": 0, "Values": [{"Id": 0, "Value": "string"}]}, + { + "FieldId": 0, + "Value": { + "Id": 0, + "Value": "string" + } + }, + { + "FieldId": 0, + "Values": [ + { + "Id": 0, + "Value": "string" + } + ] + } ], "AuditTrail": { "CreatedDate": "2020-09-08T16:10:36.389Z", "CreatedName": "string", "LastModifiedDate": "2020-09-08T16:10:36.389Z", - "LastModifiedName": "string", - }, + "LastModifiedName": "string" + } } - ], + ] } diff --git a/test/test_bluelink/test_bluelink.py b/test/test_bluelink/test_bluelink.py index 2b80cf005a..761e914221 100644 --- a/test/test_bluelink/test_bluelink.py +++ b/test/test_bluelink/test_bluelink.py @@ -1,10 +1,11 @@ import unittest import requests_mock -from parsons import Table, Bluelink -from parsons.bluelink import BluelinkPerson, BluelinkIdentifier, BluelinkEmail +from parsons import Table +from parsons.bluelink import Bluelink, BluelinkPerson, BluelinkIdentifier, BluelinkEmail class TestBluelink(unittest.TestCase): + @requests_mock.Mocker() def setUp(self, m): self.bluelink = Bluelink("fake_user", "fake_password") @@ -28,20 +29,10 @@ def row_to_person(row): @staticmethod def get_table(): - return Table( - [ - { - "given_name": "Bart", - "family_name": "Simpson", - "email": "bart@springfield.net", - }, - { - "given_name": "Homer", - "family_name": "Simpson", - "email": "homer@springfield.net", - }, - ] - ) + return Table([ + {"given_name": "Bart", "family_name": "Simpson", "email": "bart@springfield.net"}, + {"given_name": "Homer", "family_name": "Simpson", "email": "homer@springfield.net"}, + ]) @requests_mock.Mocker() def test_bulk_upsert_person(self, m): @@ -73,12 +64,10 @@ def test_upsert_person(self, m): # create a BluelinkPerson object # The BluelinkIdentifier is pretending that the user can be # identified in SALESFORCE with FAKE_ID as her id - person = BluelinkPerson( - identifiers=[BluelinkIdentifier(source="SALESFORCE", identifier="FAKE_ID")], - given_name="Jane", - family_name="Doe", - emails=[BluelinkEmail(address="jdoe@example.com", primary=True)], - ) + person = BluelinkPerson(identifiers=[BluelinkIdentifier(source="SALESFORCE", + identifier="FAKE_ID")], + given_name="Jane", family_name="Doe", + emails=[BluelinkEmail(address="jdoe@example.com", primary=True)]) # String to identify that the data came from your system. For example, your company name. source = "BLUELINK-PARSONS-TEST" @@ -98,24 +87,14 @@ def test_table_to_people(self): # expected: person1 = BluelinkPerson( - identifiers=[ - BluelinkIdentifier( - source="FAKESOURCE", identifier="bart@springfield.net" - ) - ], + identifiers=[BluelinkIdentifier(source="FAKESOURCE", + identifier="bart@springfield.net")], emails=[BluelinkEmail(address="bart@springfield.net", primary=True)], - family_name="Simpson", - given_name="Bart", - ) + family_name="Simpson", given_name="Bart") person2 = BluelinkPerson( - identifiers=[ - BluelinkIdentifier( - source="FAKESOURCE", identifier="homer@springfield.net" - ) - ], + identifiers=[BluelinkIdentifier(source="FAKESOURCE", + identifier="homer@springfield.net")], emails=[BluelinkEmail(address="homer@springfield.net", primary=True)], - family_name="Simpson", - given_name="Homer", - ) + family_name="Simpson", given_name="Homer") expected_people = [person1, person2] self.assertEqual(actual_people, expected_people) diff --git a/test/test_box/test_box_storage.py b/test/test_box/test_box_storage.py index 25113dfc2e..8d1acd1c82 100644 --- a/test/test_box/test_box_storage.py +++ b/test/test_box/test_box_storage.py @@ -7,7 +7,8 @@ from boxsdk.exception import BoxAPIException, BoxOAuthException -from parsons import Box, Table +from parsons.box import Box +from parsons.etl import Table """Prior to running, you should ensure that the relevant environment variables have been set, e.g. via @@ -17,69 +18,63 @@ export BOX_CLIENT_SECRET=bk264KHMDLVy89TeuUpSRa4CN5o35u9h export BOX_ACCESS_TOKEN=boK97B39m3ozIGyTcazbWRbi5F2SSZ5J """ -TEST_CLIENT_ID = os.getenv("BOX_CLIENT_ID") -TEST_BOX_CLIENT_SECRET = os.getenv("BOX_CLIENT_SECRET") -TEST_ACCESS_TOKEN = os.getenv("BOX_ACCESS_TOKEN") +TEST_CLIENT_ID = os.getenv('BOX_CLIENT_ID') +TEST_BOX_CLIENT_SECRET = os.getenv('BOX_CLIENT_SECRET') +TEST_ACCESS_TOKEN = os.getenv('BOX_ACCESS_TOKEN') def generate_random_string(length): """Utility to generate random alpha string for file/folder names""" - return "".join(random.choice(string.ascii_letters) for i in range(length)) + return ''.join(random.choice(string.ascii_letters) for i in range(length)) -@unittest.skipIf(not os.getenv("LIVE_TEST"), "Skipping because not running live test") +@unittest.skipIf(not os.getenv('LIVE_TEST'), 'Skipping because not running live test') class TestBoxStorage(unittest.TestCase): + def setUp(self) -> None: - warnings.filterwarnings( - action="ignore", message="unclosed", category=ResourceWarning - ) + warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning) # Create a client that we'll use to manipulate things behind the scenes self.client = Box() # Create test folder that we'll use for all our manipulations self.temp_folder_name = generate_random_string(24) - logging.info(f"Creating temp folder {self.temp_folder_name}") + logging.info(f'Creating temp folder {self.temp_folder_name}') self.temp_folder_id = self.client.create_folder(self.temp_folder_name) def tearDown(self) -> None: - logging.info(f"Deleting temp folder {self.temp_folder_name}") + logging.info(f'Deleting temp folder {self.temp_folder_name}') self.client.delete_folder_by_id(self.temp_folder_id) def test_list_files_by_id(self) -> None: # Count on environment variables being set box = Box() - subfolder = box.create_folder_by_id( - folder_name="id_subfolder", parent_folder_id=self.temp_folder_id - ) + subfolder = box.create_folder_by_id(folder_name='id_subfolder', + parent_folder_id=self.temp_folder_id) # Create a couple of files in the temp folder - table = Table( - [ - ["phone_number", "last_name", "first_name"], - ["4435705355", "Warren", "Elizabeth"], - ["5126993336", "Obama", "Barack"], - ] - ) - - box.upload_table_to_folder_id(table, "temp1", folder_id=subfolder) - box.upload_table_to_folder_id(table, "temp2", folder_id=subfolder) - box.create_folder_by_id(folder_name="temp_folder1", parent_folder_id=subfolder) - box.create_folder_by_id(folder_name="temp_folder2", parent_folder_id=subfolder) + table = Table([['phone_number', 'last_name', 'first_name'], + ['4435705355', 'Warren', 'Elizabeth'], + ['5126993336', 'Obama', 'Barack']]) + + box.upload_table_to_folder_id(table, 'temp1', folder_id=subfolder) + box.upload_table_to_folder_id(table, 'temp2', folder_id=subfolder) + box.create_folder_by_id(folder_name='temp_folder1', parent_folder_id=subfolder) + box.create_folder_by_id(folder_name='temp_folder2', parent_folder_id=subfolder) file_list = box.list_files_by_id(folder_id=subfolder) - self.assertEqual(["temp1", "temp2"], file_list["name"]) + self.assertEqual(['temp1', 'temp2'], file_list['name']) # Check that if we delete a file, it's no longer there for box_file in file_list: - if box_file["name"] == "temp1": - box.delete_file_by_id(box_file["id"]) + if box_file['name'] == 'temp1': + box.delete_file_by_id(box_file['id']) break file_list = box.list_files_by_id(folder_id=subfolder) - self.assertEqual(["temp2"], file_list["name"]) + self.assertEqual(['temp2'], file_list['name']) - folder_list = box.list_folders_by_id(folder_id=subfolder)["name"] - self.assertEqual(["temp_folder1", "temp_folder2"], folder_list) + folder_list = box.list_folders_by_id(folder_id=subfolder)['name'] + self.assertEqual(['temp_folder1', 'temp_folder2'], folder_list) def test_list_files_by_path(self) -> None: # Count on environment variables being set @@ -88,66 +83,55 @@ def test_list_files_by_path(self) -> None: # Make sure our test folder is in the right place found_default = False for item in box.list(): - if item["name"] == self.temp_folder_name: + if item['name'] == self.temp_folder_name: found_default = True break - self.assertTrue( - found_default, - f"Failed to find test folder f{self.temp_folder_name} " - f"in default Box folder", - ) - - subfolder_name = "path_subfolder" - subfolder_path = f"{self.temp_folder_name}/{subfolder_name}" + self.assertTrue(found_default, + f'Failed to find test folder f{self.temp_folder_name} ' + f'in default Box folder') + + subfolder_name = 'path_subfolder' + subfolder_path = f'{self.temp_folder_name}/{subfolder_name}' box.create_folder(path=subfolder_path) # Create a couple of files in the temp folder - table = Table( - [ - ["phone_number", "last_name", "first_name"], - ["4435705355", "Warren", "Elizabeth"], - ["5126993336", "Obama", "Barack"], - ] - ) - - box.upload_table(table, f"{subfolder_path}/temp1") - box.upload_table(table, f"{subfolder_path}/temp2") - box.create_folder(f"{subfolder_path}/temp_folder1") - box.create_folder(f"{subfolder_path}/temp_folder2") - - file_list = box.list(path=subfolder_path, item_type="file") - self.assertEqual(["temp1", "temp2"], file_list["name"]) + table = Table([['phone_number', 'last_name', 'first_name'], + ['4435705355', 'Warren', 'Elizabeth'], + ['5126993336', 'Obama', 'Barack']]) + + box.upload_table(table, f'{subfolder_path}/temp1') + box.upload_table(table, f'{subfolder_path}/temp2') + box.create_folder(f'{subfolder_path}/temp_folder1') + box.create_folder(f'{subfolder_path}/temp_folder2') + + file_list = box.list(path=subfolder_path, item_type='file') + self.assertEqual(['temp1', 'temp2'], file_list['name']) # Check that if we delete a file, it's no longer there for box_file in file_list: - if box_file["name"] == "temp1": - box.delete_file(path=f"{subfolder_path}/temp1") + if box_file['name'] == 'temp1': + box.delete_file(path=f'{subfolder_path}/temp1') break - file_list = box.list(path=subfolder_path, item_type="file") - self.assertEqual(["temp2"], file_list["name"]) + file_list = box.list(path=subfolder_path, item_type='file') + self.assertEqual(['temp2'], file_list['name']) - folder_list = box.list(path=subfolder_path, item_type="folder") - self.assertEqual(["temp_folder1", "temp_folder2"], folder_list["name"]) + folder_list = box.list(path=subfolder_path, item_type='folder') + self.assertEqual(['temp_folder1', 'temp_folder2'], folder_list['name']) # Make sure we can delete by path - box.delete_folder(f"{subfolder_path}/temp_folder1") - folder_list = box.list(path=subfolder_path, item_type="folder") - self.assertEqual(["temp_folder2"], folder_list["name"]) + box.delete_folder(f'{subfolder_path}/temp_folder1') + folder_list = box.list(path=subfolder_path, item_type='folder') + self.assertEqual(['temp_folder2'], folder_list['name']) def test_upload_file(self) -> None: # Count on environment variables being set box = Box() - table = Table( - [ - ["phone_number", "last_name", "first_name"], - ["4435705355", "Warren", "Elizabeth"], - ["5126993336", "Obama", "Barack"], - ] - ) - box_file = box.upload_table_to_folder_id( - table, "phone_numbers", folder_id=self.temp_folder_id - ) + table = Table([['phone_number', 'last_name', 'first_name'], + ['4435705355', 'Warren', 'Elizabeth'], + ['5126993336', 'Obama', 'Barack']]) + box_file = box.upload_table_to_folder_id(table, 'phone_numbers', + folder_id=self.temp_folder_id) new_table = box.get_table_by_file_id(box_file.id) @@ -155,41 +139,35 @@ def test_upload_file(self) -> None: self.assertEqual(str(table), str(new_table)) # Check that things also work in JSON - box_file = box.upload_table_to_folder_id( - table, "phone_numbers_json", folder_id=self.temp_folder_id, format="json" - ) + box_file = box.upload_table_to_folder_id(table, 'phone_numbers_json', + folder_id=self.temp_folder_id, + format='json') - new_table = box.get_table_by_file_id(box_file.id, format="json") + new_table = box.get_table_by_file_id(box_file.id, format='json') # Check that what we saved is equal to what we got back self.assertEqual(str(table), str(new_table)) # Now check the same thing with paths instead of file_id - path_filename = "path_phone_numbers" - box_file = box.upload_table(table, f"{self.temp_folder_name}/{path_filename}") - new_table = box.get_table(path=f"{self.temp_folder_name}/{path_filename}") + path_filename = 'path_phone_numbers' + box_file = box.upload_table(table, f'{self.temp_folder_name}/{path_filename}') + new_table = box.get_table(path=f'{self.temp_folder_name}/{path_filename}') # Check that we throw an exception with bad formats with self.assertRaises(ValueError): - box.upload_table_to_folder_id( - table, "phone_numbers", format="illegal_format" - ) + box.upload_table_to_folder_id(table, 'phone_numbers', format='illegal_format') with self.assertRaises(ValueError): - box.get_table_by_file_id(box_file.id, format="illegal_format") + box.get_table_by_file_id(box_file.id, format='illegal_format') def test_download_file(self) -> None: box = Box() - table = Table( - [ - ["phone_number", "last_name", "first_name"], - ["4435705355", "Warren", "Elizabeth"], - ["5126993336", "Obama", "Barack"], - ] - ) + table = Table([['phone_number', 'last_name', 'first_name'], + ['4435705355', 'Warren', 'Elizabeth'], + ['5126993336', 'Obama', 'Barack']]) uploaded_file = table.to_csv() - path_filename = f"{self.temp_folder_name}/my_path" + path_filename = f'{self.temp_folder_name}/my_path' box.upload_table(table, path_filename) downloaded_file = box.download_file(path_filename) @@ -202,31 +180,24 @@ def test_get_item_id(self) -> None: box = Box() # Create a subfolder in which we'll do this test - sub_sub_folder_name = "item_subfolder" - sub_sub_folder_id = box.create_folder_by_id( - folder_name=sub_sub_folder_name, parent_folder_id=self.temp_folder_id - ) - - table = Table( - [ - ["phone_number", "last_name", "first_name"], - ["4435705355", "Warren", "Elizabeth"], - ["5126993336", "Obama", "Barack"], - ] - ) - box_file = box.upload_table_to_folder_id( - table, "file_in_subfolder", folder_id=self.temp_folder_id - ) - - box_file = box.upload_table_to_folder_id( - table, "phone_numbers", folder_id=sub_sub_folder_id - ) + sub_sub_folder_name = 'item_subfolder' + sub_sub_folder_id = box.create_folder_by_id(folder_name=sub_sub_folder_name, + parent_folder_id=self.temp_folder_id) + + table = Table([['phone_number', 'last_name', 'first_name'], + ['4435705355', 'Warren', 'Elizabeth'], + ['5126993336', 'Obama', 'Barack']]) + box_file = box.upload_table_to_folder_id(table, 'file_in_subfolder', + folder_id=self.temp_folder_id) + + box_file = box.upload_table_to_folder_id(table, 'phone_numbers', + folder_id=sub_sub_folder_id) # Now try getting various ids - file_path = f"{self.temp_folder_name}/item_subfolder/phone_numbers" + file_path = f'{self.temp_folder_name}/item_subfolder/phone_numbers' self.assertEqual(box_file.id, box.get_item_id(path=file_path)) - file_path = f"{self.temp_folder_name}/item_subfolder" + file_path = f'{self.temp_folder_name}/item_subfolder' self.assertEqual(sub_sub_folder_id, box.get_item_id(path=file_path)) file_path = self.temp_folder_name @@ -234,65 +205,57 @@ def test_get_item_id(self) -> None: # Trailing "/" with self.assertRaises(ValueError): - file_path = f"{self.temp_folder_name}/item_subfolder/phone_numbers/" + file_path = f'{self.temp_folder_name}/item_subfolder/phone_numbers/' box.get_item_id(path=file_path) # Nonexistent file with self.assertRaises(ValueError): - file_path = ( - f"{self.temp_folder_name}/item_subfolder/nonexistent/phone_numbers" - ) + file_path = f'{self.temp_folder_name}/item_subfolder/nonexistent/phone_numbers' box.get_item_id(path=file_path) # File (rather than folder) in middle of path with self.assertRaises(ValueError): - file_path = f"{self.temp_folder_name}/file_in_subfolder/phone_numbers" + file_path = f'{self.temp_folder_name}/file_in_subfolder/phone_numbers' box.get_item_id(path=file_path) def test_errors(self) -> None: # Count on environment variables being set box = Box() - nonexistent_id = "9999999" - table = Table( - [ - ["phone_number", "last_name", "first_name"], - ["4435705355", "Warren", "Elizabeth"], - ["5126993336", "Obama", "Barack"], - ] - ) + nonexistent_id = '9999999' + table = Table([['phone_number', 'last_name', 'first_name'], + ['4435705355', 'Warren', 'Elizabeth'], + ['5126993336', 'Obama', 'Barack']]) # Upload a bad format with self.assertRaises(ValueError): - box.upload_table_to_folder_id(table, "temp1", format="bad_format") + box.upload_table_to_folder_id(table, 'temp1', format='bad_format') # Download a bad format with self.assertRaises(ValueError): - box.get_table_by_file_id(file_id=nonexistent_id, format="bad_format") + box.get_table_by_file_id(file_id=nonexistent_id, format='bad_format') # Upload to non-existent folder with self.assertLogs(level=logging.WARNING): with self.assertRaises(BoxAPIException): - box.upload_table_to_folder_id(table, "temp1", folder_id=nonexistent_id) + box.upload_table_to_folder_id(table, 'temp1', folder_id=nonexistent_id) # Download a non-existent file with self.assertLogs(level=logging.WARNING): with self.assertRaises(BoxAPIException): - box.get_table_by_file_id(nonexistent_id, format="json") + box.get_table_by_file_id(nonexistent_id, format='json') # Create folder in non-existent parent with self.assertRaises(ValueError): - box.create_folder("nonexistent_path/path") + box.create_folder('nonexistent_path/path') # Create folder in non-existent parent with self.assertLogs(level=logging.WARNING): with self.assertRaises(BoxAPIException): - box.create_folder_by_id( - folder_name="subfolder", parent_folder_id=nonexistent_id - ) + box.create_folder_by_id(folder_name='subfolder', parent_folder_id=nonexistent_id) # Try using bad credentials - box = Box(access_token="5345345345") + box = Box(access_token='5345345345') with self.assertLogs(level=logging.WARNING): with self.assertRaises(BoxOAuthException): box.list_files_by_id() diff --git a/test/test_braintree/test_braintree.py b/test/test_braintree/test_braintree.py index 8cdddeca9a..20f11ade82 100644 --- a/test/test_braintree/test_braintree.py +++ b/test/test_braintree/test_braintree.py @@ -5,149 +5,81 @@ import requests_mock from test.utils import assert_matching_tables -from parsons import Table, Braintree +from parsons.etl.table import Table +from parsons.braintree.braintree import Braintree _dir = os.path.dirname(__file__) class TestBraintree(unittest.TestCase): + def setUp(self): - self.braintree = Braintree( - merchant_id="abcd1234abcd1234", - public_key="abcd1234abcd1234", - private_key="abcd1234abcd1234abcd1234abcd1234", - ) + self.braintree = Braintree(merchant_id='abcd1234abcd1234', public_key='abcd1234abcd1234', + private_key='abcd1234abcd1234abcd1234abcd1234') @requests_mock.Mocker() def test_dispute_search(self, m): - m.post( - "https://api.braintreegateway.com:443" - "/merchants/abcd1234abcd1234/disputes/advanced_search?page=1", - text=open(f"{_dir}/test_data/dispute_example.xml").read(), - ) - table = self.braintree.get_disputes( - start_date="2020-01-01", end_date="2020-01-02" - ) + m.post('https://api.braintreegateway.com:443' + '/merchants/abcd1234abcd1234/disputes/advanced_search?page=1', + text=open(f'{_dir}/test_data/dispute_example.xml').read()) + table = self.braintree.get_disputes(start_date="2020-01-01", end_date="2020-01-02") self.assertEqual(len(table.table), 3) - self.assertEqual(table[0]["id"], "abcd1234abcd1234") - self.assertEqual(table[1]["id"], "ghjk6789ghjk6789") - self.assertEqual(table[0]["transaction_id"], "d9f876fg") - self.assertEqual(table[1]["transaction_id"], "98df87fg") - self.assertEqual(table[0]["reason"], "transaction_amount_differs") - self.assertEqual(table[1]["reason"], "fraud") + self.assertEqual(table[0]['id'], 'abcd1234abcd1234') + self.assertEqual(table[1]['id'], 'ghjk6789ghjk6789') + self.assertEqual(table[0]['transaction_id'], 'd9f876fg') + self.assertEqual(table[1]['transaction_id'], '98df87fg') + self.assertEqual(table[0]['reason'], 'transaction_amount_differs') + self.assertEqual(table[1]['reason'], 'fraud') @requests_mock.Mocker() def test_transaction_search(self, m): - m.post( - "https://api.braintreegateway.com:443" - "/merchants/abcd1234abcd1234/transactions/advanced_search_ids", - text=""" + m.post('https://api.braintreegateway.com:443' + '/merchants/abcd1234abcd1234/transactions/advanced_search_ids', + text=""" 50 1234abcd 0987asdf - """, - ) - table = self.braintree.get_transactions( - disbursement_start_date="2020-01-01", - disbursement_end_date="2020-01-02", - just_ids=True, - ) - assert_matching_tables(table, Table([["id"], ["1234abcd"], ["0987asdf"]])) - m.post( - "https://api.braintreegateway.com:443" - "/merchants/abcd1234abcd1234/transactions/advanced_search", - text=open(f"{_dir}/test_data/transaction_example.xml").read(), - ) - full_table = self.braintree.get_transactions( - disbursement_start_date="2020-01-01", - disbursement_end_date="2020-01-02", - table_of_ids=table, - ) - self.assertEqual(len(table.table), 3) - self.assertEqual(len(full_table.table), 3) - self.assertEqual(table[0]["id"], "1234abcd") - self.assertEqual(table[1]["id"], "0987asdf") - self.assertEqual(len(table[0].keys()), 1) - self.assertEqual(len(full_table[0].keys()), 67) - - self.assertEqual( - full_table[0]["disbursement_date"], datetime.date(2019, 12, 30) - ) - self.assertEqual(full_table[0]["credit_card_bin"], "789234") - self.assertEqual(full_table[0]["disbursement_success"], True) - self.assertEqual(full_table[0]["amount"], decimal.Decimal("150.00")) - - @requests_mock.Mocker() - def test_subscription_search(self, m): - m.post( - "https://api.braintreegateway.com:443" - "/merchants/abcd1234abcd1234/subscriptions/advanced_search_ids", - text=""" - - 50 - aabbcc 1a2b3c - - """, - ) - table = self.braintree.get_subscriptions( - start_date="2022-08-22", end_date="2022-08-23", just_ids=True - ) - assert_matching_tables(table, Table([["id"], ["aabbcc"], ["1a2b3c"]])) - m.post( - "https://api.braintreegateway.com:443" - "/merchants/abcd1234abcd1234/subscriptions/advanced_search", - text=open(f"{_dir}/test_data/subscription_example.xml").read(), - ) - full_table = self.braintree.get_subscriptions( - start_date="2020-01-01", - end_date="2020-01-02", - table_of_ids=table, - include_transactions=True, - ) + """) + table = self.braintree.get_transactions(disbursement_start_date="2020-01-01", + disbursement_end_date="2020-01-02", + just_ids=True) + assert_matching_tables(table, Table([['id'], ['1234abcd'], ['0987asdf']])) + m.post('https://api.braintreegateway.com:443' + '/merchants/abcd1234abcd1234/transactions/advanced_search', + text=open(f'{_dir}/test_data/transaction_example.xml').read()) + full_table = self.braintree.get_transactions(disbursement_start_date="2020-01-01", + disbursement_end_date="2020-01-02", + table_of_ids=table) self.assertEqual(len(table.table), 3) self.assertEqual(len(full_table.table), 3) - self.assertEqual(table[0]["id"], "aabbcc") - self.assertEqual(table[1]["id"], "1a2b3c") + self.assertEqual(table[0]['id'], '1234abcd') + self.assertEqual(table[1]['id'], '0987asdf') self.assertEqual(len(table[0].keys()), 1) - self.assertEqual(len(full_table[0].keys()), 33) + self.assertEqual(len(full_table[0].keys()), 64) - self.assertEqual( - full_table[0]["first_billing_date"], datetime.date(2022, 8, 22) - ) - self.assertEqual( - full_table[0]["transactions"][0].credit_card_details.bin, "999" - ) - self.assertEqual(full_table[0]["never_expires"], True) - self.assertEqual(full_table[0]["price"], decimal.Decimal("10.00")) + self.assertEqual(full_table[0]['disbursement_date'], datetime.date(2019, 12, 30)) + self.assertEqual(full_table[0]['credit_card_bin'], '789234') + self.assertEqual(full_table[0]['disbursement_success'], True) + self.assertEqual(full_table[0]['amount'], decimal.Decimal('150.00')) def test_query_generation(self): query = self.braintree._get_query_objects( - "transaction", - **{"disbursement_date": {"between": ["2020-01-01", "2020-01-01"]}}, - ) - self.assertEqual(query[0].name, "disbursement_date") - self.assertEqual( - query[0].to_param(), {"min": "2020-01-01", "max": "2020-01-01"} - ) + 'transaction', **{'disbursement_date': {'between': ["2020-01-01", "2020-01-01"]}}) + self.assertEqual(query[0].name, 'disbursement_date') + self.assertEqual(query[0].to_param(), {'min': '2020-01-01', 'max': '2020-01-01'}) query = self.braintree._get_query_objects( - "transaction", **{"merchant_account_id": {"in_list": ["abc123"]}} - ) + 'transaction', **{'merchant_account_id': {'in_list': ["abc123"]}}) - self.assertEqual(query[0].name, "merchant_account_id") - self.assertEqual(query[0].to_param(), ["abc123"]) + self.assertEqual(query[0].name, 'merchant_account_id') + self.assertEqual(query[0].to_param(), ['abc123']) query = self.braintree._get_query_objects( - "dispute", - **{ - "merchant_account_id": {"in_list": ["abc123"]}, - "effective_date": {"between": ["2020-01-01", "2020-01-01"]}, - }, - ) - self.assertEqual(query[0].name, "merchant_account_id") - self.assertEqual(query[1].name, "effective_date") - self.assertEqual( - query[1].to_param(), {"min": "2020-01-01", "max": "2020-01-01"} - ) + 'dispute', **{ + 'merchant_account_id': {'in_list': ["abc123"]}, + 'effective_date': {'between': ["2020-01-01", "2020-01-01"]}}) + self.assertEqual(query[0].name, 'merchant_account_id') + self.assertEqual(query[1].name, 'effective_date') + self.assertEqual(query[1].to_param(), {'min': '2020-01-01', 'max': '2020-01-01'}) diff --git a/test/test_braintree/test_data/subscription_example.xml b/test/test_braintree/test_data/subscription_example.xml deleted file mode 100644 index b8ae052acf..0000000000 --- a/test/test_braintree/test_data/subscription_example.xml +++ /dev/null @@ -1,542 +0,0 @@ - - - 1 - 50 - 2 - - - 0.00 - 22 - 2022-09-21 - 2022-08-22 - 2022-08-23T03:30:00Z - 2022-08-23T03:30:00Z - 1 - - - 0 - 2022-08-22 - aabbcc - ExampleOrg_instant - true - 10.00 - 10.00 - 2022-09-22 - - 2022-09-21 - ptoken1 - monthly - 10.00 - Active - - day - false - - - - - - - - - t1234abcd - settled - sale - USD - 10.00 - - ExampleOrg_instant - - - - 2022-08-23T03:29:58Z - 2022-08-23T07:30:00Z - - 999999 - Example - Customer - - example.customer@example.com - - - - gcaaaaa11112222 - - - aa - - - - 123 Main Street - - City - MA - 55555 - United States of America - US - USA - 840 - - - - - - - 2022-08-23_exampleorg_sb123 - - - - - - - - - - - - - - - - - false - - M - M - I - - 111111 - 1000 - Approved - - - - - false - - false - - t123 - 999 - 5555 - American Express - 12 - 2022 - US - - https://assets.braintreegateway.com/payment_method_logo/american_express.png?environment=production - Unknown - Unknown - No - Unknown - No - Unknown - American Express - USA - Unknown - gcc111 - - fff000fff - false - - - - - 2022-08-23T03:30:00Z - authorized - 10.00 - user_123 - recurring - - - 2022-08-23T03:30:00Z - submitted_for_settlement - 10.00 - user_123 - recurring - - - 2022-08-23T07:30:00Z - settled - 10.00 - - - - monthly - 7bst8j - - 2022-09-21 - 2022-08-22 - - - - - - - - - true - - - - - - - - - - - - - - - credit_card - 4000 - Confirmed - 000 - Approved - - - - - 00556677 - approved - 2022-08-30T03:30:00Z - - - - - - - gtrabcd - - - - - - - - - 1111 - 00000001 - EXAMPLE ORGANIZATION - - - BigCity - CA - 00001 - 9175555555 - - false - - - - receipt1 - greceipt1 - 10.00 - USD - 1000 - Approved - 111111 - EXAMPLE ORGANIZATION - - - BigCity - CA - 00001 - 9175555555 - - 1111 - 00000001 - sale - false - - - American Express - 5555 - - - - - - - 2022-08-23T03:30:00Z - Active - user_123 - api - 0.00 - 10.00 - USD - monthly - - - - - - 0.00 - 22 - 2022-09-21 - 2022-08-22 - 2022-08-23T03:49:39Z - 2022-08-23T03:49:39Z - 1 - - - 0 - 2022-08-22 - 1a2b3c - ExampleOrg_instant - true - 25.00 - 25.00 - 2022-09-22 - - 2022-09-21 - ptoken2 - monthly - 25.00 - Active - - day - false - - - - - - - - - t5678efgh - settled - sale - USD - 25.00 - - ExampleOrg_instant - - - - 2022-08-23T04:00:05Z - 2022-08-23T07:30:00Z - - 222222 - Another - Fakename - - another.fakename@example.com - - - - gcust2222 - - - bb - - - - 1000 Vin Scully Ave - - Los Angeles - CA - 90012 - United States of America - US - USA - 840 - - - - - - - 2022-08-23_exampleorg_sb123 - - - - - - - - - - - - - - - - - false - - M - M - I - - 222222 - 1000 - Approved - - - - - false - - false - - t456 - 888 - 8888 - MasterCard - 10 - 2025 - US - - https://assets.braintreegateway.com/payment_method_logo/american_express.png?environment=production - Unknown - Unknown - No - Unknown - No - Unknown - MasterCard - USA - Unknown - gcc222 - - bbb888bbb - false - - - - - 2022-08-23T04:00:06Z - authorized - 25.00 - user_123 - recurring - - - 2022-08-23T04:00:06Z - submitted_for_settlement - 25.00 - user_123 - recurring - - - 2022-08-23T07:30:00Z - settled - 25.00 - - - - monthly - dh63m8 - - 2022-09-21 - 2022-08-22 - - - - - - - - - true - - - - - - - - - - - - - - - credit_card - 4000 - Confirmed - 00 - Approved or completed successfully - - - - - NTID2 - approved - 2022-08-30T03:49:38Z - - - - - - - gtrbbbb - - - - - - - - - 1111 - 00000001 - EXAMPLE ORGANIZATION - - - BigCity - CA - 00001 - 9175555555 - - false - - - - receipt1 - greceipt2 - 25.00 - USD - 1000 - Approved - 222222 - EXAMPLE ORGANIZATION - - - BigCity - CA - 00001 - 9175555555 - - 1111 - 00000001 - sale - false - - - MasterCard - 9287 - - - - - - - 2022-08-23T04:00:07Z - Active - user_123 - api - 0.00 - 25.00 - USD - monthly - - - - diff --git a/test/test_capitol_canary.py b/test/test_capitol_canary.py deleted file mode 100644 index c0916b82f3..0000000000 --- a/test/test_capitol_canary.py +++ /dev/null @@ -1,336 +0,0 @@ -import unittest -import requests_mock -from test.utils import validate_list -from parsons import CapitolCanary -import os -import copy - - -adv_json = { - "data": [ - { - "id": 7125439, - "prefix": "Mr.", - "firstname": "Bob", - "middlename": "Smith", - "lastname": "Smit", - "suffix": None, - "notes": None, - "stage": None, - "connections": 3, - "tags": [ - "register-to-vote-38511", - "registered-to-vote-for-2018-38511", - ], - "created_at": "2017-05-23 23:36:04.000000", - "updated_at": "2018-12-17 21:55:24.000000", - "address": { - "street1": "25255 Maine Ave", - "street2": "", - "city": "Los Angeles", - "state": "CA", - "zip5": 96055, - "zip4": 9534, - "county": "Tehama", - "latitude": "50.0632635", - "longitude": "-122.09654", - }, - "districts": { - "congressional": "1", - "stateSenate": "4", - "stateHouse": "3", - "cityCouncil": None, - }, - "ids": [], - "memberships": [ - { - "id": 15151443, - "campaignid": 25373, - "name": "20171121 Businesses for Responsible Tax Reform - Contact Congress", - "source": None, - "created_at": "2017-11-21 23:28:30.000000", - }, - { - "id": 20025582, - "campaignid": 32641, - "name": "20180524 March for America", - "source": None, - "created_at": "2018-05-24 21:09:49.000000", - }, - ], - "fields": [], - "phones": [ - {"id": 10537860, "address": "+19995206447", "subscribed": "false"} - ], - "emails": [ - {"id": 10537871, "address": "N@k.com", "subscribed": "false"}, - {"id": 10950446, "address": "email@me.com", "subscribed": "false"}, - ], - } - ], - "pagination": { - "count": 1, - "per_page": 100, - "current_page": 1, - "next_url": "https://api.phone2action.com/2.0/advocates?page=2", - }, -} - -camp_json = [ - { - "id": 25373, - "name": "20171121 Businesses for Responsible Tax Reform - Contact Congress", - "display_name": "Businesses for Responsible Tax Reform", - "subtitle": "Tell Congress: Stand up for responsible tax reform!", - "public": 1, - "topic": None, - "type": "CAMPAIGN", - "link": "http://p2a.co/KHcUyTK", - "restrict_allow": None, - "content": { - "summary": "", - "introduction": "Welcome", - "call_to_action": "Contact your officials in one click!", - "thank_you": "

Thanks for taking action. Please encourage others to act by " - "sharing on social media.

", - "background_image": None, - }, - "updated_at": { - "date": "2017-11-21 23:27:11.000000", - "timezone_type": 3, - "timezone": "UTC", - }, - } -] - - -def parse_request_body(m): - kvs = m.split("&") - return {kv.split("=")[0]: kv.split("=")[1] for kv in kvs} - - -class TestP2A(unittest.TestCase): - def setUp(self): - - self.cc = CapitolCanary(app_id="an_id", app_key="app_key") - - def tearDown(self): - - pass - - def test_init_args(self): - # Test initializing class with args - # Done in the setUp - - pass - - def test_old_init_envs(self): - # Test initializing class with old envs - - os.environ["PHONE2ACTION_APP_ID"] = "id" - os.environ["PHONE2ACTION_APP_KEY"] = "key" - - cc_envs = CapitolCanary() - self.assertEqual(cc_envs.app_id, "id") - self.assertEqual(cc_envs.app_key, "key") - - def test_init_envs(self): - # Test initializing class with envs - - os.environ["CAPITOLCANARY_APP_ID"] = "id" - os.environ["CAPITOLCANARY_APP_KEY"] = "key" - - cc_envs = CapitolCanary() - self.assertEqual(cc_envs.app_id, "id") - self.assertEqual(cc_envs.app_key, "key") - - @requests_mock.Mocker() - def test_get_advocates(self, m): - - m.get(self.cc.client.uri + "advocates", json=adv_json) - - adv_exp = [ - "id", - "prefix", - "firstname", - "middlename", - "lastname", - "suffix", - "notes", - "stage", - "connections", - "created_at", - "updated_at", - "address_city", - "address_county", - "address_latitude", - "address_longitude", - "address_state", - "address_street1", - "address_street2", - "address_zip4", - "address_zip5", - "districts_cityCouncil", - "districts_congressional", - "districts_stateHouse", - "districts_stateSenate", - ] - - self.assertTrue(validate_list(adv_exp, self.cc.get_advocates()["advocates"])) - ids_exp = ["advocate_id", "ids"] - - self.assertTrue(validate_list(ids_exp, self.cc.get_advocates()["ids"])) - - phone_exp = ["advocate_id", "phones_address", "phones_id", "phones_subscribed"] - self.assertTrue(validate_list(phone_exp, self.cc.get_advocates()["phones"])) - - tags_exp = ["advocate_id", "tags"] - self.assertTrue(validate_list(tags_exp, self.cc.get_advocates()["tags"])) - - email_exp = ["advocate_id", "emails_address", "emails_id", "emails_subscribed"] - self.assertTrue(validate_list(email_exp, self.cc.get_advocates()["emails"])) - - member_exp = [ - "advocate_id", - "memberships_campaignid", - "memberships_created_at", - "memberships_id", - "memberships_name", - "memberships_source", - ] - self.assertTrue( - validate_list(member_exp, self.cc.get_advocates()["memberships"]) - ) - - fields_exp = ["advocate_id", "fields"] - self.assertTrue(validate_list(fields_exp, self.cc.get_advocates()["fields"])) - - @requests_mock.Mocker() - def test_get_advocates__by_page(self, m): - - response = copy.deepcopy(adv_json) - # Make it look like there's more data - response["pagination"]["count"] = 100 - - m.get(self.cc.client.uri + "advocates?page=1", json=adv_json) - m.get( - self.cc.client.uri + "advocates?page=2", - exc=Exception("Should only call once"), - ) - - results = self.cc.get_advocates(page=1) - self.assertTrue(results["advocates"].num_rows, 1) - - @requests_mock.Mocker() - def test_get_advocates__empty(self, m): - - response = copy.deepcopy(adv_json) - response["data"] = [] - # Make it look like there's more data - response["pagination"]["count"] = 0 - - m.get(self.cc.client.uri + "advocates", json=adv_json) - - results = self.cc.get_advocates() - self.assertTrue(results["advocates"].num_rows, 0) - - @requests_mock.Mocker() - def test_get_campaigns(self, m): - - camp_exp = [ - "id", - "name", - "display_name", - "subtitle", - "public", - "topic", - "type", - "link", - "restrict_allow", - "updated_at_date", - "updated_at_timezone", - "updated_at_timezone_type", - "content_background_image", - "content_call_to_action", - "content_introduction", - "content_summary", - "content_thank_you", - ] - - m.get(self.cc.client.uri + "campaigns", json=camp_json) - - self.assertTrue(validate_list(camp_exp, self.cc.get_campaigns())) - - @requests_mock.Mocker() - def test_create_advocate(self, m): - - m.post(self.cc.client.uri + "advocates", json={"advocateid": 1}) - - # Test arg validation - create requires a phone or an email - self.assertRaises( - ValueError, - lambda: self.cc.create_advocate( - campaigns=[1], firstname="Foo", lastname="bar" - ), - ) - # Test arg validation - sms opt in requires a phone - self.assertRaises( - ValueError, - lambda: self.cc.create_advocate( - campaigns=[1], email="foo@bar.com", sms_optin=True - ), - ) - - # Test arg validation - email opt in requires a email - self.assertRaises( - ValueError, - lambda: self.cc.create_advocate( - campaigns=[1], phone="1234567890", email_optin=True - ), - ) - - # Test a successful call - advocateid = self.cc.create_advocate( - campaigns=[1], email="foo@bar.com", email_optin=True, firstname="Test" - ) - self.assertTrue(m.called) - self.assertEqual(advocateid, 1) - - # Check that the properties were mapped - data = parse_request_body(m.last_request.text) - self.assertEqual(data["firstname"], "Test") - self.assertNotIn("lastname", data) - self.assertEqual(data["emailOptin"], "1") - self.assertEqual(data["email"], "foo%40bar.com") - - @requests_mock.Mocker() - def test_update_advocate(self, m): - - m.post(self.cc.client.uri + "advocates") - - # Test arg validation - sms opt in requires a phone - self.assertRaises( - ValueError, lambda: self.cc.update_advocate(advocate_id=1, sms_optin=True) - ) - - # Test arg validation - email opt in requires a email - self.assertRaises( - ValueError, lambda: self.cc.update_advocate(advocate_id=1, email_optin=True) - ) - - # Test a successful call - self.cc.update_advocate( - advocate_id=1, - campaigns=[1], - email="foo@bar.com", - email_optin=True, - firstname="Test", - ) - self.assertTrue(m.called) - - # Check that the properties were mapped - data = parse_request_body(m.last_request.text) - self.assertEqual(data["firstname"], "Test") - self.assertNotIn("lastname", data) - self.assertEqual(data["emailOptin"], "1") - self.assertEqual(data["email"], "foo%40bar.com") diff --git a/test/test_civis.py b/test/test_civis.py index 1eed0e7340..66f90f164a 100644 --- a/test/test_civis.py +++ b/test/test_civis.py @@ -1,14 +1,14 @@ import unittest import os -from parsons import CivisClient, Table +from parsons.civis import CivisClient +from parsons.etl.table import Table # from . import scratch_creds -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test') class TestCivisClient(unittest.TestCase): + def setUp(self): self.civis = CivisClient() @@ -19,7 +19,7 @@ def setUp(self): create schema test_parsons; """ - self.lst_dicts = [{"first": "Bob", "last": "Smith"}] + self.lst_dicts = [{'first': 'Bob', 'last': 'Smith'}] self.tbl = Table(self.lst_dicts) self.civis.query(setup_sql) @@ -36,16 +36,16 @@ def tearDown(self): def test_table_import_query(self): # Test that a good table imports correctly - self.civis.table_import(self.tbl, "test_parsons.test_table") + self.civis.table_import(self.tbl, 'test_parsons.test_table') def test_query(self): # Test that queries match - self.civis.table_import(self.tbl, "test_parsons.test_table") + self.civis.table_import(self.tbl, 'test_parsons.test_table') tbl = self.civis.query("SELECT COUNT(*) FROM test_parsons.test_table") - self.assertEqual(tbl[0]["count"], "1") + self.assertEqual(tbl[0]['count'], '1') def test_to_civis(self): # Test that the to_civis() method works too - self.tbl.to_civis("test_parsons.test_table") + self.tbl.to_civis('test_parsons.test_table') diff --git a/test/test_controlshift/test_controlshift.py b/test/test_controlshift/test_controlshift.py index 47cffd9c69..a6973d64db 100644 --- a/test/test_controlshift/test_controlshift.py +++ b/test/test_controlshift/test_controlshift.py @@ -1,12 +1,13 @@ import requests_mock from unittest import TestCase from test.utils import mark_live_test, validate_list -from parsons import Controlshift +from parsons.controlshift.controlshift import Controlshift from test.test_controlshift import test_cs_data as test_data # type: ignore @mark_live_test class TestControlshiftLive(TestCase): + def test_get_live_petitions(self): cs = Controlshift() tbl = cs.get_petitions() @@ -14,17 +15,22 @@ def test_get_live_petitions(self): class TestControlshiftMock(TestCase): + def setUp(self): - self.hostname = "https://test.example.com" + self.hostname = 'https://test.example.com' @requests_mock.Mocker() def test_get_petitions(self, m): - m.post(f"{self.hostname}/oauth/token", json={"access_token": "123"}) + m.post( + f'{self.hostname}/oauth/token', + json={'access_token': '123'}) cs = Controlshift( - hostname=self.hostname, client_id="1234", client_secret="1234" + hostname=self.hostname, + client_id='1234', + client_secret='1234' ) - m.get(f"{self.hostname}/api/v1/petitions", json=test_data.petition_test_data) + m.get(f'{self.hostname}/api/v1/petitions', json=test_data.petition_test_data) tbl = cs.get_petitions() self.assertTrue(validate_list(test_data.expected_petition_columns, tbl)) diff --git a/test/test_controlshift/test_cs_data.py b/test/test_controlshift/test_cs_data.py index 24cc0cfecf..57351590f8 100644 --- a/test/test_controlshift/test_cs_data.py +++ b/test/test_controlshift/test_cs_data.py @@ -1,125 +1,63 @@ petition_test_data = { - "petitions": [ - { - "admin_events_status": "auto", - "admin_status": "unreviewed", - "alias": None, - "campaigner_contactable": True, - "can_download_signers": True, - "created_at": "2021-11-17T22:51:56Z", - "custom_goal": None, - "delivery_details": None, - "external_facebook_page": None, - "external_site": None, - "goal": 100, - "hide_petition_creator": None, - "hide_recent_signers": None, - "hide_signature_form": None, - "id": 536084, - "launched": True, - "locale": "en", - "petition_creator_name_override": None, - "redirect_to": None, - "show_progress_bar": True, - "signature_count_add_amount": None, - "slug": "test-1475", - "source": None, - "title": "Test", - "updated_at": "2021-11-17T22:51:58Z", - "what": "Test Petition", - "who": "Test Rep", - "why": "This is a test.", - "title_locked": None, - "who_locked": None, - "what_locked": None, - "why_locked": None, - "delivery_details_locked": None, - "external_facebook_page_locked": None, - "external_site_locked": None, - "categories_locked": None, - "url": "http://test.controlshift.app/petitions/test-1475", - "public_who": "Test Rep", - "ended": False, - "successful": False, - "image": None, - "public_signature_count": 1, - "admin_notes": [], - "creator": { - "full_name": "foo", - "first_name": "foo", - "last_name": "bar", - "email": "foo@bar.org", - "phone_number": None, - }, - "mentor": None, - "reviewer": None, - "location": None, - "decision_makers": [], - "effort": None, - "partnership": None, - "labels": [], - "categories": [], - } - ], + 'petitions': [{ + 'admin_events_status': 'auto', + 'admin_status': 'unreviewed', + 'alias': None, + 'campaigner_contactable': True, + 'can_download_signers': True, + 'created_at': '2021-11-17T22:51:56Z', + 'custom_goal': None, + 'delivery_details': None, + 'external_facebook_page': None, + 'external_site': None, 'goal': 100, + 'hide_petition_creator': None, + 'hide_recent_signers': None, + 'hide_signature_form': None, 'id': 536084, + 'launched': True, 'locale': 'en', + 'petition_creator_name_override': None, + 'redirect_to': None, 'show_progress_bar': True, + 'signature_count_add_amount': None, + 'slug': 'test-1475', 'source': None, 'title': 'Test', + 'updated_at': '2021-11-17T22:51:58Z', 'what': 'Test Petition', 'who': 'Test Rep', + 'why': 'This is a test.', + 'title_locked': None, + 'who_locked': None, 'what_locked': None, 'why_locked': None, + 'delivery_details_locked': None, + 'external_facebook_page_locked': None, + 'external_site_locked': None, + 'categories_locked': None, + 'url': 'http://test.controlshift.app/petitions/test-1475', + 'public_who': 'Test Rep', + 'ended': False, 'successful': False, + 'image': None, 'public_signature_count': 1, + 'admin_notes': [], + 'creator': { + 'full_name': 'foo', 'first_name': 'foo', 'last_name': + 'bar', 'email': 'foo@bar.org', + 'phone_number': None}, + 'mentor': None, + 'reviewer': None, 'location': None, + 'decision_makers': [], 'effort': None, + 'partnership': None, 'labels': [], + 'categories': [] + }], "meta": { "current_page": 1, "total_pages": 1, "previous_page": None, - "next_page": None, - }, + "next_page": None + } } expected_petition_columns = [ - "admin_events_status", - "admin_status", - "alias", - "campaigner_contactable", - "can_download_signers", - "created_at", - "custom_goal", - "delivery_details", - "external_facebook_page", - "external_site", - "goal", - "hide_petition_creator", - "hide_recent_signers", - "hide_signature_form", - "id", - "launched", - "locale", - "petition_creator_name_override", - "redirect_to", - "show_progress_bar", - "signature_count_add_amount", - "slug", - "source", - "title", - "updated_at", - "what", - "who", - "why", - "title_locked", - "who_locked", - "what_locked", - "why_locked", - "delivery_details_locked", - "external_facebook_page_locked", - "external_site_locked", - "categories_locked", - "url", - "public_who", - "ended", - "successful", - "image", - "public_signature_count", - "admin_notes", - "creator", - "mentor", - "reviewer", - "location", - "decision_makers", - "effort", - "partnership", - "labels", - "categories", -] + 'admin_events_status', 'admin_status', 'alias', 'campaigner_contactable', + 'can_download_signers', 'created_at', 'custom_goal', 'delivery_details', + 'external_facebook_page', 'external_site', 'goal', 'hide_petition_creator', + 'hide_recent_signers', 'hide_signature_form', 'id', 'launched', 'locale', + 'petition_creator_name_override', 'redirect_to', 'show_progress_bar', + 'signature_count_add_amount', 'slug', + 'source', 'title', 'updated_at', 'what', 'who', 'why', 'title_locked', 'who_locked', + 'what_locked', 'why_locked', 'delivery_details_locked', 'external_facebook_page_locked', + 'external_site_locked', 'categories_locked', 'url', 'public_who', 'ended', 'successful', + 'image', 'public_signature_count', 'admin_notes', 'creator', 'mentor', 'reviewer', + 'location', 'decision_makers', 'effort', 'partnership', 'labels', 'categories'] diff --git a/test/test_copper/test_copper.py b/test/test_copper/test_copper.py index 6c34d64b8a..223a2c149d 100644 --- a/test/test_copper/test_copper.py +++ b/test/test_copper/test_copper.py @@ -3,7 +3,8 @@ import json import requests_mock import sys -from parsons import Copper, Table +from parsons.copper import Copper +from parsons.etl import Table from test.utils import assert_matching_tables import logging @@ -12,7 +13,7 @@ # Log to the console strm_hdlr = logging.StreamHandler(sys.stdout) -strm_hdlr.setFormatter(logging.Formatter("%(message)s")) +strm_hdlr.setFormatter(logging.Formatter('%(message)s')) strm_hdlr.setLevel(logging.DEBUG) logger.addHandler(strm_hdlr) @@ -24,191 +25,95 @@ class TestCopper(unittest.TestCase): + def setUp(self): - self.cp = Copper("usr@losr.fake", "key") + self.cp = Copper('usr@losr.fake', 'key') # Using people as the most complicated object for test_get_standard_object() # Defined at self scope for use in test_get_people() - self.processed_people = Table( - [ - { - "id": 78757050, - "name": "Person One", - "prefix": None, - "first_name": "Person", - "middle_name": None, - "last_name": "One", - "suffix": None, - "assignee_id": None, - "company_id": 12030795, - "company_name": "Indivisible CityA", - "contact_type_id": 501950, - "details": None, - "tags": [], - "title": None, - "date_created": 1558169903, - "date_modified": 1558169910, - "date_last_contacted": 1558169891, - "interaction_count": 1, - "leads_converted_from": [], - "date_lead_created": None, - "address_city": "CityA", - "address_country": None, - "address_postal_code": "12345", - "address_state": "StateI", - "address_street": None, - }, - { - "id": 78477076, - "name": "Person Two", - "prefix": None, - "first_name": "Person", - "middle_name": None, - "last_name": "Two", - "suffix": None, - "assignee_id": 289533, - "company_id": 12096071, - "company_name": "Indivisible StateII", - "contact_type_id": 501950, - "details": None, - "tags": ["treasurer"], - "title": "Treasurer", - "date_created": 1557761054, - "date_modified": 1558218799, - "date_last_contacted": 1558196341, - "interaction_count": 14, - "leads_converted_from": [], - "date_lead_created": None, - "address_city": None, - "address_country": None, - "address_postal_code": None, - "address_state": None, - "address_street": None, - }, - { - "id": 78839154, - "name": "Person Three", - "prefix": None, - "first_name": "Person", - "middle_name": None, - "last_name": "Three", - "suffix": None, - "assignee_id": None, - "company_id": 34966944, - "company_name": "Flip StateIII", - "contact_type_id": 501950, - "details": None, - "tags": [], - "title": None, - "date_created": 1558223367, - "date_modified": 1558223494, - "date_last_contacted": 1558223356, - "interaction_count": 2, - "leads_converted_from": [], - "date_lead_created": None, - "address_city": "CityC", - "address_country": None, - "address_postal_code": "54321", - "address_state": "StateIII", - "address_street": None, - }, - ] - ) + self.processed_people = Table([ + { + 'id': 78757050, 'name': 'Person One', 'prefix': None, 'first_name': 'Person', + 'middle_name': None, 'last_name': 'One', 'suffix': None, 'assignee_id': None, + 'company_id': 12030795, 'company_name': 'Indivisible CityA', + 'contact_type_id': 501950, 'details': None, 'tags': [], 'title': None, + 'date_created': 1558169903, 'date_modified': 1558169910, + 'date_last_contacted': 1558169891, 'interaction_count': 1, + 'leads_converted_from': [], 'date_lead_created': None, 'address_city': 'CityA', + 'address_country': None, 'address_postal_code': '12345', + 'address_state': 'StateI', 'address_street': None, + }, { + 'id': 78477076, 'name': 'Person Two', 'prefix': None, 'first_name': 'Person', + 'middle_name': None, 'last_name': 'Two', 'suffix': None, 'assignee_id': 289533, + 'company_id': 12096071, 'company_name': 'Indivisible StateII', + 'contact_type_id': 501950, 'details': None, 'tags': ['treasurer'], + 'title': 'Treasurer', 'date_created': 1557761054, 'date_modified': 1558218799, + 'date_last_contacted': 1558196341, 'interaction_count': 14, + 'leads_converted_from': [], 'date_lead_created': None, 'address_city': None, + 'address_country': None, 'address_postal_code': None, 'address_state': None, + 'address_street': None + }, { + 'id': 78839154, 'name': 'Person Three', 'prefix': None, 'first_name': 'Person', + 'middle_name': None, 'last_name': 'Three', 'suffix': None, 'assignee_id': None, + 'company_id': 34966944, 'company_name': 'Flip StateIII', 'contact_type_id': 501950, + 'details': None, 'tags': [], 'title': None, 'date_created': 1558223367, + 'date_modified': 1558223494, 'date_last_contacted': 1558223356, + 'interaction_count': 2, 'leads_converted_from': [], 'date_lead_created': None, + 'address_city': 'CityC', 'address_country': None, 'address_postal_code': '54321', + 'address_state': 'StateIII', 'address_street': None + } + ]) # Tables and table names for test_get_custom_fields() and test_process_custom_fields() self.custom_field_tables = {} self.custom_field_table_names = [ - "custom_fields", - "custom_fields_available", - "custom_fields_options", - ] - - self.custom_field_tables["custom_fields"] = Table( - [ - {"id": 101674, "name": "Event Date", "data_type": "Date"}, - {"id": 102127, "name": "Date Added", "data_type": "Date"}, - {"id": 109116, "name": "Local Group Subtype", "data_type": "Dropdown"}, - ] - ) - - self.custom_field_tables["custom_fields_available"] = Table( - [ - {"id": 101674, "available_on": "opportunity"}, - {"id": 102127, "available_on": "company"}, - {"id": 102127, "available_on": "person"}, - {"id": 109116, "available_on": "company"}, - ] - ) - - self.custom_field_tables["custom_fields_options"] = Table( - [ - { - "id": 109116, - "name": "Local Group Subtype", - "options_id": 140251, - "options_name": "Public (displayed in map)", - "options_rank": 0, - }, - { - "id": 109116, - "name": "Local Group Subtype", - "options_id": 140250, - "options_name": "New (Needs Processing)", - "options_rank": 4, - }, - { - "id": 109116, - "name": "Local Group Subtype", - "options_id": 140252, - "options_name": "Private (not on map)", - "options_rank": 1, - }, - { - "id": 109116, - "name": "Local Group Subtype", - "options_id": 140254, - "options_name": "National", - "options_rank": 5, - }, - { - "id": 109116, - "name": "Local Group Subtype", - "options_id": 140766, - "options_name": "Not following principles", - "options_rank": 3, - }, - { - "id": 109116, - "name": "Local Group Subtype", - "options_id": 140764, - "options_name": "International", - "options_rank": 6, - }, - { - "id": 109116, - "name": "Local Group Subtype", - "options_id": 141434, - "options_name": "Inactive", - "options_rank": 2, - }, - ] - ) + 'custom_fields', 'custom_fields_available', 'custom_fields_options'] + + self.custom_field_tables['custom_fields'] = Table([ + {'id': 101674, 'name': 'Event Date', 'data_type': 'Date'}, + {'id': 102127, 'name': 'Date Added', 'data_type': 'Date'}, + {'id': 109116, 'name': 'Local Group Subtype', 'data_type': 'Dropdown'} + ]) + + self.custom_field_tables['custom_fields_available'] = Table([ + {'id': 101674, 'available_on': 'opportunity'}, + {'id': 102127, 'available_on': 'company'}, + {'id': 102127, 'available_on': 'person'}, + {'id': 109116, 'available_on': 'company'} + ]) + + self.custom_field_tables['custom_fields_options'] = Table([ + {"id": 109116, "name": "Local Group Subtype", "options_id": 140251, + "options_name": "Public (displayed in map)", "options_rank": 0}, + {"id": 109116, "name": "Local Group Subtype", "options_id": 140250, + "options_name": "New (Needs Processing)", "options_rank": 4}, + {"id": 109116, "name": "Local Group Subtype", "options_id": 140252, + "options_name": "Private (not on map)", "options_rank": 1}, + {"id": 109116, "name": "Local Group Subtype", "options_id": 140254, + "options_name": "National", "options_rank": 5}, + {"id": 109116, "name": "Local Group Subtype", "options_id": 140766, + "options_name": "Not following principles", "options_rank": 3}, + {"id": 109116, "name": "Local Group Subtype", "options_id": 140764, + "options_name": "International", "options_rank": 6}, + {"id": 109116, "name": "Local Group Subtype", "options_id": 141434, + "options_name": "Inactive", "options_rank": 2} + ]) def test_init(self): - self.assertEqual(self.cp.user_email, "usr@losr.fake") - self.assertEqual(self.cp.api_key, "key") + self.assertEqual(self.cp.user_email, 'usr@losr.fake') + self.assertEqual(self.cp.api_key, 'key') @requests_mock.Mocker() def test_base_request(self, m): # Assert the fake_search dict is returned - m.post(self.cp.uri + "/people/search", json=fake_search) + m.post(self.cp.uri + '/people/search', json=fake_search) self.assertEqual( fake_search, - json.loads(self.cp.base_request("/people/search", req_type="POST").text), - ) + json.loads(self.cp.base_request('/people/search', req_type='POST').text) + ) def paginate_callback(self, request, context): # Internal method for simulating pagination @@ -221,18 +126,18 @@ def paginate_callback(self, request, context): row_finish = 100 else: pdict = json.loads(request.text) - page_number = pdict["page_number"] - 1 - page_size = pdict["page_size"] + page_number = pdict['page_number'] - 1 + page_size = pdict['page_size'] row_start = page_number * page_size row_finish = row_start + page_size - with open(f'{_dir}/{context.headers["filename"]}', "r") as json_file: + with open(f'{_dir}/{context.headers["filename"]}', 'r') as json_file: response = json.load(json_file) if isinstance(response, list): - context.headers["X-Pw-Total"] = str(len(response)) - return response[row_start:row_finish] + context.headers['X-Pw-Total'] = str(len(response)) + return response[row_start: row_finish] else: return response @@ -242,160 +147,137 @@ def test_paginate_request(self, m): # Anonymized real output with nested columns self.blob = [ { - "id": 78757050, - "name": "Person One", - "prefix": None, - "first_name": "Person", - "middle_name": None, - "last_name": "One", - "suffix": None, - "address": { - "street": None, - "city": "CityA", - "state": "StateI", - "postal_code": "12345", - "country": None, - }, - "assignee_id": None, - "company_id": 12030795, - "company_name": "Indivisible CityA", - "contact_type_id": 501950, - "details": None, - "emails": [{"email": "PersonOne@fakemail.nope", "category": "work"}], - "phone_numbers": [ - {"number": "(541) 555-9585", "category": "work"}, - {"number": "555-555-9585", "category": "work"}, - ], - "socials": [ - {"url": "https://gravatar.com/gravatar", "category": "gravatar"} - ], - "tags": [], - "title": None, - "websites": [ - {"url": "http://www.IndivisibleCityA.org", "category": None} - ], - "custom_fields": [ - {"custom_field_definition_id": 125880, "value": None}, - {"custom_field_definition_id": 107297, "value": None}, - {"custom_field_definition_id": 102127, "value": None}, - {"custom_field_definition_id": 135034, "value": None}, - {"custom_field_definition_id": 107298, "value": None}, - {"custom_field_definition_id": 108972, "value": None}, - {"custom_field_definition_id": 125881, "value": None}, - ], - "date_created": 1558169903, - "date_modified": 1558169910, - "date_last_contacted": 1558169891, - "interaction_count": 1, - "leads_converted_from": [], - "date_lead_created": None, - }, - { - "id": 78477076, - "name": "Person Two", - "prefix": None, - "first_name": "Person", - "middle_name": None, - "last_name": "Two", - "suffix": None, - "address": { - "street": None, - "city": None, - "state": None, - "postal_code": None, - "country": None, - }, - "assignee_id": 289533, - "company_id": 12096071, - "company_name": "Indivisible StateII", - "contact_type_id": 501950, - "details": None, - "emails": [{"email": "Personb23@gmail.com", "category": "work"}], - "phone_numbers": [{"number": "(908) 555-2941", "category": "work"}], - "socials": [], - "tags": ["treasurer"], - "title": "Treasurer", - "websites": [], - "custom_fields": [ - {"custom_field_definition_id": 125880, "value": None}, - {"custom_field_definition_id": 107297, "value": None}, - {"custom_field_definition_id": 102127, "value": None}, - {"custom_field_definition_id": 135034, "value": None}, - {"custom_field_definition_id": 107298, "value": None}, - {"custom_field_definition_id": 108972, "value": None}, - {"custom_field_definition_id": 125881, "value": None}, - ], - "date_created": 1557761054, - "date_modified": 1558218799, - "date_last_contacted": 1558196341, - "interaction_count": 14, - "leads_converted_from": [], - "date_lead_created": None, - }, - { - "id": 78839154, - "name": "Person Three", - "prefix": None, - "first_name": "Person", - "middle_name": None, - "last_name": "Three", - "suffix": None, - "address": { - "street": None, - "city": "CityC", - "state": "StateIII", - "postal_code": "54321", - "country": None, - }, - "assignee_id": None, - "company_id": 34966944, - "company_name": "Flip StateIII", - "contact_type_id": 501950, - "details": None, - "emails": [{"email": "Person.Three@fakemail.nope", "category": "work"}], - "phone_numbers": [{"number": "(619) 555-7883", "category": "work"}], - "socials": [ - {"url": "https://twitter.com/ThreePerson", "category": "twitter"}, - { - "url": "https://www.facebook.com/Person.n.Three", - "category": "facebook", - }, - {"url": "https://gravatar.com/PersonThree", "category": "gravatar"}, - ], - "tags": [], - "title": None, - "websites": [], - "custom_fields": [ - {"custom_field_definition_id": 125880, "value": None}, - {"custom_field_definition_id": 107297, "value": None}, - {"custom_field_definition_id": 102127, "value": None}, - {"custom_field_definition_id": 135034, "value": None}, - {"custom_field_definition_id": 107298, "value": None}, - {"custom_field_definition_id": 108972, "value": None}, - {"custom_field_definition_id": 125881, "value": None}, - ], - "date_created": 1558223367, - "date_modified": 1558223494, - "date_last_contacted": 1558223356, - "interaction_count": 2, - "leads_converted_from": [], - "date_lead_created": None, - }, + 'id': 78757050, + 'name': 'Person One', + 'prefix': None, + 'first_name': 'Person', + 'middle_name': None, + 'last_name': 'One', + 'suffix': None, + 'address': { + 'street': None, 'city': 'CityA', 'state': 'StateI', + 'postal_code': '12345', 'country': None}, + 'assignee_id': None, + 'company_id': 12030795, + 'company_name': 'Indivisible CityA', + 'contact_type_id': 501950, + 'details': None, + 'emails': [{'email': 'PersonOne@fakemail.nope', 'category': 'work'}], + 'phone_numbers': [ + {'number': '(541) 555-9585', 'category': 'work'}, + {'number': '555-555-9585', 'category': 'work'}], + 'socials': [{'url': 'https://gravatar.com/gravatar', 'category': 'gravatar'}], + 'tags': [], + 'title': None, + 'websites': [{'url': 'http://www.IndivisibleCityA.org', 'category': None}], + 'custom_fields': [ + {'custom_field_definition_id': 125880, 'value': None}, + {'custom_field_definition_id': 107297, 'value': None}, + {'custom_field_definition_id': 102127, 'value': None}, + {'custom_field_definition_id': 135034, 'value': None}, + {'custom_field_definition_id': 107298, 'value': None}, + {'custom_field_definition_id': 108972, 'value': None}, + {'custom_field_definition_id': 125881, 'value': None}], + 'date_created': 1558169903, + 'date_modified': 1558169910, + 'date_last_contacted': 1558169891, + 'interaction_count': 1, + 'leads_converted_from': [], + 'date_lead_created': None + }, { + 'id': 78477076, + 'name': 'Person Two', + 'prefix': None, + 'first_name': 'Person', + 'middle_name': None, + 'last_name': 'Two', + 'suffix': None, + 'address': { + 'street': None, 'city': None, 'state': None, + 'postal_code': None, 'country': None}, + 'assignee_id': 289533, + 'company_id': 12096071, + 'company_name': 'Indivisible StateII', + 'contact_type_id': 501950, + 'details': None, + 'emails': [{'email': 'Personb23@gmail.com', 'category': 'work'}], + 'phone_numbers': [{'number': '(908) 555-2941', 'category': 'work'}], + 'socials': [], + 'tags': ['treasurer'], + 'title': 'Treasurer', + 'websites': [], + 'custom_fields': [ + {'custom_field_definition_id': 125880, 'value': None}, + {'custom_field_definition_id': 107297, 'value': None}, + {'custom_field_definition_id': 102127, 'value': None}, + {'custom_field_definition_id': 135034, 'value': None}, + {'custom_field_definition_id': 107298, 'value': None}, + {'custom_field_definition_id': 108972, 'value': None}, + {'custom_field_definition_id': 125881, 'value': None}], + 'date_created': 1557761054, + 'date_modified': 1558218799, + 'date_last_contacted': 1558196341, + 'interaction_count': 14, + 'leads_converted_from': [], + 'date_lead_created': None + }, { + 'id': 78839154, + 'name': 'Person Three', + 'prefix': None, + 'first_name': 'Person', + 'middle_name': None, + 'last_name': 'Three', + 'suffix': None, + 'address': { + 'street': None, 'city': 'CityC', 'state': 'StateIII', + 'postal_code': '54321', 'country': None}, + 'assignee_id': None, + 'company_id': 34966944, + 'company_name': 'Flip StateIII', + 'contact_type_id': 501950, + 'details': None, + 'emails': [{'email': 'Person.Three@fakemail.nope', 'category': 'work'}], + 'phone_numbers': [{'number': '(619) 555-7883', 'category': 'work'}], + 'socials': [ + {'url': 'https://twitter.com/ThreePerson', 'category': 'twitter'}, + {'url': 'https://www.facebook.com/Person.n.Three', 'category': 'facebook'}, + {'url': 'https://gravatar.com/PersonThree', 'category': 'gravatar'}], + 'tags': [], + 'title': None, + 'websites': [], + 'custom_fields': [ + {'custom_field_definition_id': 125880, 'value': None}, + {'custom_field_definition_id': 107297, 'value': None}, + {'custom_field_definition_id': 102127, 'value': None}, + {'custom_field_definition_id': 135034, 'value': None}, + {'custom_field_definition_id': 107298, 'value': None}, + {'custom_field_definition_id': 108972, 'value': None}, + {'custom_field_definition_id': 125881, 'value': None}], + 'date_created': 1558223367, + 'date_modified': 1558223494, + 'date_last_contacted': 1558223356, + 'interaction_count': 2, + 'leads_converted_from': [], + 'date_lead_created': None + } ] # Mock endpoints m.post( - self.cp.uri + "/people/search", + self.cp.uri + '/people/search', json=self.paginate_callback, - headers={"filename": "people_search.txt"}, - ) + headers={"filename": "people_search.txt"}) # self.assertTrue( assert_matching_tables( Table(self.blob), Table( - self.cp.paginate_request("/people/search", page_size=1, req_type="POST") - ), + self.cp.paginate_request( + '/people/search', + page_size=1, + req_type='POST' + ) + ) ) def test_process_json(self): @@ -403,173 +285,119 @@ def test_process_json(self): # Stress-testing combination of unpack methods with contrived table from hell fake_response = [ { - "id": 1, - "Simple List Col": ["one", "two", "three"], - "Mixed List Col": [None, 2, "three"], - "Spotty List Col": [1, 2, 3], - "Multidim List Col": [[1, 2], [None, "two"], []], - "Nested List Col": [ - {"A": 1, "B": "one"}, - {"A": 2, "B": "two"}, - {"A": 3, "B": "three"}, - ], - "Simple Dict Col": {"one": 1, "two": 2, "three": 3}, - "Nested Dict Col": {"A": 1, "B": ["two", 2], "C": [None, 3, "three"]}, - }, - { - "id": 2, - "Simple List Col": ["four", "five", "six"], - "Mixed List Col": ["four", None, 6], - "Spotty List Col": [], - "Multidim List Col": [[3, None], [], ["three", "four"]], - "Nested List Col": [ - {"A": 4, "B": "four"}, - {"A": 5, "B": "five"}, - {"A": 6, "B": "six"}, - ], - "Simple Dict Col": {"one": "I", "two": "II", "three": "III"}, - "Nested Dict Col": {"A": ["one"], "B": [], "C": 3}, - }, - { - "id": 3, - "Simple List Col": ["seven", "eight", "nine"], - "Mixed List Col": [7, "eight", None], - "Spotty List Col": None, - "Multidim List Col": [["five", 6], [None]], - "Nested List Col": [ - {"A": 7, "B": "seven"}, - {"A": 8, "B": "eight"}, - {"A": 9, "B": "nine"}, - ], - "Simple Dict Col": {"one": "x", "two": "xx", "three": "xxx"}, - "Nested Dict Col": {"A": None, "B": 2, "C": [None, 3, "three"]}, - }, + 'id': 1, 'Simple List Col': ['one', 'two', 'three'], + 'Mixed List Col': [None, 2, 'three'], 'Spotty List Col': [1, 2, 3], + 'Multidim List Col': [[1, 2], [None, 'two'], []], + 'Nested List Col': [ + {'A': 1, 'B': 'one'}, + {'A': 2, 'B': 'two'}, + {'A': 3, 'B': 'three'}], + 'Simple Dict Col': {'one': 1, 'two': 2, 'three': 3}, + 'Nested Dict Col': {'A': 1, 'B': ['two', 2], 'C': [None, 3, 'three']} + }, { + 'id': 2, 'Simple List Col': ['four', 'five', 'six'], + 'Mixed List Col': ['four', None, 6], 'Spotty List Col': [], + 'Multidim List Col': [[3, None], [], ['three', 'four']], + 'Nested List Col': [ + {'A': 4, 'B': 'four'}, + {'A': 5, 'B': 'five'}, + {'A': 6, 'B': 'six'}], + 'Simple Dict Col': {'one': 'I', 'two': 'II', 'three': 'III'}, + 'Nested Dict Col': {'A': ['one'], 'B': [], 'C': 3}, + }, { + 'id': 3, 'Simple List Col': ['seven', 'eight', 'nine'], + 'Mixed List Col': [7, 'eight', None], 'Spotty List Col': None, + 'Multidim List Col': [['five', 6], [None]], + 'Nested List Col': [ + {'A': 7, 'B': 'seven'}, + {'A': 8, 'B': 'eight'}, + {'A': 9, 'B': 'nine'}], + 'Simple Dict Col': {'one': 'x', 'two': 'xx', 'three': 'xxx'}, + 'Nested Dict Col': {'A': None, 'B': 2, 'C': [None, 3, 'three']} + } ] fake_response_tables = {} - table_names = ["fake_Nested List Col", "fake"] - - fake_response_tables["fake_Nested List Col"] = Table( - [ - {"id": 1, "Nested List Col_A": 1, "Nested List Col_B": "one"}, - {"id": 1, "Nested List Col_A": 2, "Nested List Col_B": "two"}, - {"id": 1, "Nested List Col_A": 3, "Nested List Col_B": "three"}, - {"id": 2, "Nested List Col_A": 4, "Nested List Col_B": "four"}, - {"id": 2, "Nested List Col_A": 5, "Nested List Col_B": "five"}, - {"id": 2, "Nested List Col_A": 6, "Nested List Col_B": "six"}, - {"id": 3, "Nested List Col_A": 7, "Nested List Col_B": "seven"}, - {"id": 3, "Nested List Col_A": 8, "Nested List Col_B": "eight"}, - {"id": 3, "Nested List Col_A": 9, "Nested List Col_B": "nine"}, - ] - ) - - fake_response_tables["fake"] = Table( - [ - { - "id": 1, - "Simple List Col": ["one", "two", "three"], - "Mixed List Col": [None, 2, "three"], - "Spotty List Col": [1, 2, 3], - "Multidim List Col": [[1, 2], [None, "two"], []], - "Simple Dict Col_one": 1, - "Simple Dict Col_three": 3, - "Simple Dict Col_two": 2, - "Nested Dict Col_A": 1, - "Nested Dict Col_B": ["two", 2], - "Nested Dict Col_C": [None, 3, "three"], - }, - { - "id": 2, - "Simple List Col": ["four", "five", "six"], - "Mixed List Col": ["four", None, 6], - "Spotty List Col": [], - "Multidim List Col": [[3, None], [], ["three", "four"]], - "Simple Dict Col_one": "I", - "Simple Dict Col_three": "III", - "Simple Dict Col_two": "II", - "Nested Dict Col_A": ["one"], - "Nested Dict Col_B": [], - "Nested Dict Col_C": 3, - }, - { - "id": 3, - "Simple List Col": ["seven", "eight", "nine"], - "Mixed List Col": [7, "eight", None], - "Spotty List Col": [None], - "Multidim List Col": [["five", 6], [None]], - "Simple Dict Col_one": "x", - "Simple Dict Col_three": "xxx", - "Simple Dict Col_two": "xx", - "Nested Dict Col_A": None, - "Nested Dict Col_B": 2, - "Nested Dict Col_C": [None, 3, "three"], - }, - ] - ) - - fake_processed = self.cp.process_json(fake_response, "fake") - self.assertTrue([f["name"] for f in fake_processed] == table_names) + table_names = ['fake_Nested List Col', 'fake'] + + fake_response_tables['fake_Nested List Col'] = Table([ + {"id": 1, "Nested List Col_A": 1, "Nested List Col_B": "one"}, + {"id": 1, "Nested List Col_A": 2, "Nested List Col_B": "two"}, + {"id": 1, "Nested List Col_A": 3, "Nested List Col_B": "three"}, + {"id": 2, "Nested List Col_A": 4, "Nested List Col_B": "four"}, + {"id": 2, "Nested List Col_A": 5, "Nested List Col_B": "five"}, + {"id": 2, "Nested List Col_A": 6, "Nested List Col_B": "six"}, + {"id": 3, "Nested List Col_A": 7, "Nested List Col_B": "seven"}, + {"id": 3, "Nested List Col_A": 8, "Nested List Col_B": "eight"}, + {"id": 3, "Nested List Col_A": 9, "Nested List Col_B": "nine"} + ]) + + fake_response_tables['fake'] = Table([ + {"id": 1, "Simple List Col": ["one", "two", "three"], + "Mixed List Col": [None, 2, "three"], "Spotty List Col": [1, 2, 3], + "Multidim List Col": [[1, 2], [None, "two"], []], + "Simple Dict Col_one": 1, "Simple Dict Col_three": 3, + "Simple Dict Col_two": 2, "Nested Dict Col_A": 1, + "Nested Dict Col_B": ["two", 2], "Nested Dict Col_C": [None, 3, "three"]}, + {"id": 2, "Simple List Col": ["four", "five", "six"], + "Mixed List Col": ["four", None, 6], "Spotty List Col": [], + "Multidim List Col": [[3, None], [], ["three", "four"]], + "Simple Dict Col_one": "I", "Simple Dict Col_three": "III", + "Simple Dict Col_two": "II", "Nested Dict Col_A": ["one"], + "Nested Dict Col_B": [], "Nested Dict Col_C": 3}, + {"id": 3, "Simple List Col": ["seven", "eight", "nine"], + "Mixed List Col": [7, "eight", None], "Spotty List Col": [None], + "Multidim List Col": [["five", 6], [None]], "Simple Dict Col_one": "x", + "Simple Dict Col_three": "xxx", "Simple Dict Col_two": "xx", + "Nested Dict Col_A": None, "Nested Dict Col_B": 2, + "Nested Dict Col_C": [None, 3, "three"]} + ]) + + fake_processed = self.cp.process_json(fake_response, 'fake') + self.assertTrue([f['name'] for f in fake_processed] == table_names) for tbl in table_names: assert_matching_tables( - [f["tbl"] for f in fake_processed if f["name"] == tbl][0], - fake_response_tables[tbl], + [f['tbl'] for f in fake_processed if f['name'] == tbl][0], + fake_response_tables[tbl] ) - fake_tidy = self.cp.process_json(fake_response, "fake", tidy=0) - self.assertTrue(len(fake_tidy) == len(fake_response[0]) - 1) + fake_tidy = self.cp.process_json(fake_response, 'fake', tidy=0) + self.assertTrue(len(fake_tidy) == len(fake_response[0])-1) def test_process_custom_fields(self): # Using same json file and processed data in testing both process_ and get_ methods - with open(f"{_dir}/custom_fields_search.json", "r") as json_file: + with open(f'{_dir}/custom_fields_search.json', 'r') as json_file: fake_response = json.load(json_file) fake_processed = self.cp.process_custom_fields(fake_response) - self.assertTrue( - [f["name"] for f in fake_processed] == self.custom_field_table_names - ) + self.assertTrue([f['name'] for f in fake_processed] == self.custom_field_table_names) for tbl in self.custom_field_table_names: assert_matching_tables( - [f["tbl"] for f in fake_processed if f["name"] == tbl][0], - self.custom_field_tables[tbl], + [f['tbl'] for f in fake_processed if f['name'] == tbl][0], + self.custom_field_tables[tbl] ) @requests_mock.Mocker() def test_get_standard_object(self, m): - processed_people_emails = Table( - [ - { - "id": 78757050, - "emails_category": "work", - "emails_email": "PersonOne@fakemail.nope", - }, - { - "id": 78477076, - "emails_category": "work", - "emails_email": "Personb23@gmail.com", - }, - { - "id": 78839154, - "emails_category": "work", - "emails_email": "Person.Three@fakemail.nope", - }, - ] - ) + processed_people_emails = Table([ + {'id': 78757050, 'emails_category': 'work', 'emails_email': 'PersonOne@fakemail.nope'}, + {'id': 78477076, 'emails_category': 'work', 'emails_email': 'Personb23@gmail.com'}, + {'id': 78839154, 'emails_category': 'work', + 'emails_email': 'Person.Three@fakemail.nope'} + ]) m.post( - self.cp.uri + "/people/search", + self.cp.uri + '/people/search', json=self.paginate_callback, - headers={"filename": "people_search.txt"}, - ) + headers={"filename": "people_search.txt"}) # Object-specific get_ functions are just wrappers for get_standard_object() # So the following line is the only difference from test_get_people() processed_blob = self.cp.get_standard_object("people") - blob_people = [f for f in processed_blob if f["name"] == "people"][0]["tbl"] - blob_people_emails = [ - f for f in processed_blob if f["name"] == "people_emails" - ][0]["tbl"] + blob_people = [f for f in processed_blob if f['name'] == "people"][0]['tbl'] + blob_people_emails = [f for f in processed_blob if f['name'] == "people_emails"][0]['tbl'] assert_matching_tables(self.processed_people, blob_people) assert_matching_tables(processed_people_emails, blob_people_emails) @@ -577,36 +405,20 @@ def test_get_standard_object(self, m): @requests_mock.Mocker() def test_get_people(self, m): - processed_people_emails = Table( - [ - { - "id": 78757050, - "emails_category": "work", - "emails_email": "PersonOne@fakemail.nope", - }, - { - "id": 78477076, - "emails_category": "work", - "emails_email": "Personb23@gmail.com", - }, - { - "id": 78839154, - "emails_category": "work", - "emails_email": "Person.Three@fakemail.nope", - }, - ] - ) + processed_people_emails = Table([ + {'id': 78757050, 'emails_category': 'work', 'emails_email': 'PersonOne@fakemail.nope'}, + {'id': 78477076, 'emails_category': 'work', 'emails_email': 'Personb23@gmail.com'}, + {'id': 78839154, 'emails_category': 'work', + 'emails_email': 'Person.Three@fakemail.nope'} + ]) m.post( - self.cp.uri + "/people/search", + self.cp.uri + '/people/search', json=self.paginate_callback, - headers={"filename": "people_search.txt"}, - ) + headers={"filename": "people_search.txt"}) processed_blob = self.cp.get_people() - blob_people = [f for f in processed_blob if f["name"] == "people"][0]["tbl"] - blob_people_emails = [ - f for f in processed_blob if f["name"] == "people_emails" - ][0]["tbl"] + blob_people = [f for f in processed_blob if f['name'] == "people"][0]['tbl'] + blob_people_emails = [f for f in processed_blob if f['name'] == "people_emails"][0]['tbl'] # Actually testing get_standard_object() and process_json() # Dicts & simple lists are unpacked to columns on original table @@ -620,147 +432,133 @@ def test_get_people(self, m): @requests_mock.Mocker() def test_get_opportunities(self, m): - processed_opps = Table( - [ - { - "id": 14340759, - "name": "Company1", - "assignee_id": 659394, - "close_date": None, - "company_id": 29324143, - "company_name": "Company1", - "customer_source_id": None, - "details": None, - "loss_reason_id": None, - "pipeline_id": 489028, - "pipeline_stage_id": 2529569, - "primary_contact_id": 67747998, - "priority": "High", - "status": "Open", - "tags": ["opportunities import-1540158946352"], - "interaction_count": 0, - "monetary_unit": "USD", - "monetary_value": 100000.0, - "converted_unit": None, - "converted_value": None, - "win_probability": None, - "date_stage_changed": 1548866182, - "date_last_contacted": None, - "leads_converted_from": [], - "date_lead_created": None, - "date_created": 1540159060, - "date_modified": 1550858334, - }, - { - "id": 14161592, - "name": "Company2", - "assignee_id": 659394, - "close_date": "11/10/2018", - "company_id": 28729196, - "company_name": "Company2", - "customer_source_id": None, - "details": None, - "loss_reason_id": None, - "pipeline_id": 531482, - "pipeline_stage_id": 2607171, - "primary_contact_id": 67243374, - "priority": "High", - "status": "Open", - "tags": [], - "interaction_count": 36, - "monetary_unit": "USD", - "monetary_value": 77000.0, - "converted_unit": None, - "converted_value": None, - "win_probability": None, - "date_stage_changed": 1551191957, - "date_last_contacted": 1552339800, - "leads_converted_from": [], - "date_lead_created": None, - "date_created": 1539192375, - "date_modified": 1552340016, - }, - { - "id": 14286548, - "name": "Company3", - "assignee_id": 644608, - "close_date": "11/18/2018", - "company_id": 29492294, - "company_name": "Company3", - "customer_source_id": None, - "details": None, - "loss_reason_id": None, - "pipeline_id": 531482, - "pipeline_stage_id": 2482007, - "primary_contact_id": 67637400, - "priority": "None", - "status": "Open", - "tags": [], - "interaction_count": 19, - "monetary_unit": "USD", - "monetary_value": 150000.0, - "converted_unit": None, - "converted_value": None, - "win_probability": 0, - "date_stage_changed": 1539870749, - "date_last_contacted": 1555534313, - "leads_converted_from": [], - "date_lead_created": None, - "date_created": 1539870749, - "date_modified": 1555550658, - }, - ] - ) + processed_opps = Table([ + { + "id": 14340759, + "name": "Company1", + "assignee_id": 659394, + "close_date": None, + "company_id": 29324143, + "company_name": "Company1", + "customer_source_id": None, + "details": None, "loss_reason_id": None, + "pipeline_id": 489028, "pipeline_stage_id": 2529569, + "primary_contact_id": 67747998, + "priority": "High", + "status": "Open", + "tags": ["opportunities import-1540158946352"], + "interaction_count": 0, + "monetary_unit": "USD", + "monetary_value": 100000.0, + "converted_unit": None, + "converted_value": None, + "win_probability": None, + "date_stage_changed": 1548866182, + "date_last_contacted": None, + "leads_converted_from": [], + "date_lead_created": None, + "date_created": 1540159060, + "date_modified": 1550858334 + }, { + "id": 14161592, + "name": "Company2", + "assignee_id": 659394, + "close_date": "11/10/2018", + "company_id": 28729196, + "company_name": "Company2", + "customer_source_id": None, + "details": None, + "loss_reason_id": None, + "pipeline_id": 531482, + "pipeline_stage_id": 2607171, + "primary_contact_id": 67243374, + "priority": "High", + "status": "Open", + "tags": [], + "interaction_count": 36, + "monetary_unit": "USD", + "monetary_value": 77000.0, + "converted_unit": None, + "converted_value": None, + "win_probability": None, + "date_stage_changed": 1551191957, + "date_last_contacted": 1552339800, + "leads_converted_from": [], + "date_lead_created": None, + "date_created": 1539192375, + "date_modified": 1552340016 + }, { + "id": 14286548, + "name": "Company3", + "assignee_id": 644608, + "close_date": "11/18/2018", + "company_id": 29492294, + "company_name": "Company3", + "customer_source_id": None, + "details": None, + "loss_reason_id": None, + "pipeline_id": 531482, + "pipeline_stage_id": 2482007, + "primary_contact_id": 67637400, + "priority": "None", + "status": "Open", + "tags": [], + "interaction_count": 19, + "monetary_unit": "USD", + "monetary_value": 150000.0, + "converted_unit": None, + "converted_value": None, + "win_probability": 0, + "date_stage_changed": 1539870749, + "date_last_contacted": 1555534313, + "leads_converted_from": [], + "date_lead_created": None, + "date_created": 1539870749, "date_modified": 1555550658 + } + ]) - processed_opps_cf = Table( - [ - { - "id": 14340759, - "custom_fields_custom_field_definition_id": 272931, - "custom_fields_value": [], - }, - { - "id": 14340759, - "custom_fields_custom_field_definition_id": 272927, - "custom_fields_value": None, - }, - { - "id": 14161592, - "custom_fields_custom_field_definition_id": 272931, - "custom_fields_value": [], - }, - { - "id": 14161592, - "custom_fields_custom_field_definition_id": 272927, - "custom_fields_value": None, - }, - { - "id": 14286548, - "custom_fields_custom_field_definition_id": 272931, - "custom_fields_value": [], - }, - { - "id": 14286548, - "custom_fields_custom_field_definition_id": 272927, - "custom_fields_value": None, - }, - ] - ) + processed_opps_cf = Table([ + { + "id": 14340759, + "custom_fields_custom_field_definition_id": 272931, + "custom_fields_value": [] + }, + { + "id": 14340759, + "custom_fields_custom_field_definition_id": 272927, + "custom_fields_value": None + }, + { + "id": 14161592, + "custom_fields_custom_field_definition_id": 272931, + "custom_fields_value": [] + }, + { + "id": 14161592, + "custom_fields_custom_field_definition_id": 272927, + "custom_fields_value": None + }, + { + "id": 14286548, + "custom_fields_custom_field_definition_id": 272931, + "custom_fields_value": [] + }, + { + "id": 14286548, + "custom_fields_custom_field_definition_id": 272927, + "custom_fields_value": None + } + ]) m.post( - self.cp.uri + "/opportunities/search", + self.cp.uri + '/opportunities/search', json=self.paginate_callback, - headers={"filename": "opportunities_search.json"}, - ) + headers={"filename": "opportunities_search.json"}) processed_blob = self.cp.get_opportunities() - blob_opps = [f for f in processed_blob if f["name"] == "opportunities"][0][ - "tbl" - ] - blob_opps_cf = [ - f for f in processed_blob if f["name"] == "opportunities_custom_fields" - ] - blob_opps_cf = blob_opps_cf[0]["tbl"] + blob_opps = [f for f in processed_blob if f['name'] == "opportunities"][0]['tbl'] + blob_opps_cf = [f for f in processed_blob if f['name'] == "opportunities_custom_fields"] + blob_opps_cf = blob_opps_cf[0]['tbl'] assert_matching_tables(processed_opps, blob_opps) assert_matching_tables(processed_opps_cf, blob_opps_cf) @@ -768,147 +566,133 @@ def test_get_opportunities(self, m): @requests_mock.Mocker() def test_get_opportunities2(self, m): - processed_opps = Table( - [ - { - "id": 14340759, - "name": "Company1", - "assignee_id": 659394, - "close_date": None, - "company_id": 29324143, - "company_name": "Company1", - "customer_source_id": None, - "details": None, - "loss_reason_id": None, - "pipeline_id": 489028, - "pipeline_stage_id": 2529569, - "primary_contact_id": 67747998, - "priority": "High", - "status": "Open", - "tags": ["opportunities import-1540158946352"], - "interaction_count": 0, - "monetary_unit": "USD", - "monetary_value": 100000.0, - "converted_unit": None, - "converted_value": None, - "win_probability": None, - "date_stage_changed": 1548866182, - "date_last_contacted": None, - "leads_converted_from": [], - "date_lead_created": None, - "date_created": 1540159060, - "date_modified": 1550858334, - }, - { - "id": 14161592, - "name": "Company2", - "assignee_id": 659394, - "close_date": "11/10/2018", - "company_id": 28729196, - "company_name": "Company2", - "customer_source_id": None, - "details": None, - "loss_reason_id": None, - "pipeline_id": 531482, - "pipeline_stage_id": 2607171, - "primary_contact_id": 67243374, - "priority": "High", - "status": "Open", - "tags": [], - "interaction_count": 36, - "monetary_unit": "USD", - "monetary_value": 77000.0, - "converted_unit": None, - "converted_value": None, - "win_probability": None, - "date_stage_changed": 1551191957, - "date_last_contacted": 1552339800, - "leads_converted_from": [], - "date_lead_created": None, - "date_created": 1539192375, - "date_modified": 1552340016, - }, - { - "id": 14286548, - "name": "Company3", - "assignee_id": 644608, - "close_date": "11/18/2018", - "company_id": 29492294, - "company_name": "Company3", - "customer_source_id": None, - "details": None, - "loss_reason_id": None, - "pipeline_id": 531482, - "pipeline_stage_id": 2482007, - "primary_contact_id": 67637400, - "priority": "None", - "status": "Open", - "tags": [], - "interaction_count": 19, - "monetary_unit": "USD", - "monetary_value": 150000.0, - "converted_unit": None, - "converted_value": None, - "win_probability": 0, - "date_stage_changed": 1539870749, - "date_last_contacted": 1555534313, - "leads_converted_from": [], - "date_lead_created": None, - "date_created": 1539870749, - "date_modified": 1555550658, - }, - ] - ) + processed_opps = Table([ + { + "id": 14340759, + "name": "Company1", + "assignee_id": 659394, + "close_date": None, + "company_id": 29324143, + "company_name": "Company1", + "customer_source_id": None, + "details": None, "loss_reason_id": None, + "pipeline_id": 489028, "pipeline_stage_id": 2529569, + "primary_contact_id": 67747998, + "priority": "High", + "status": "Open", + "tags": ["opportunities import-1540158946352"], + "interaction_count": 0, + "monetary_unit": "USD", + "monetary_value": 100000.0, + "converted_unit": None, + "converted_value": None, + "win_probability": None, + "date_stage_changed": 1548866182, + "date_last_contacted": None, + "leads_converted_from": [], + "date_lead_created": None, + "date_created": 1540159060, + "date_modified": 1550858334 + }, { + "id": 14161592, + "name": "Company2", + "assignee_id": 659394, + "close_date": "11/10/2018", + "company_id": 28729196, + "company_name": "Company2", + "customer_source_id": None, + "details": None, + "loss_reason_id": None, + "pipeline_id": 531482, + "pipeline_stage_id": 2607171, + "primary_contact_id": 67243374, + "priority": "High", + "status": "Open", + "tags": [], + "interaction_count": 36, + "monetary_unit": "USD", + "monetary_value": 77000.0, + "converted_unit": None, + "converted_value": None, + "win_probability": None, + "date_stage_changed": 1551191957, + "date_last_contacted": 1552339800, + "leads_converted_from": [], + "date_lead_created": None, + "date_created": 1539192375, + "date_modified": 1552340016 + }, { + "id": 14286548, + "name": "Company3", + "assignee_id": 644608, + "close_date": "11/18/2018", + "company_id": 29492294, + "company_name": "Company3", + "customer_source_id": None, + "details": None, + "loss_reason_id": None, + "pipeline_id": 531482, + "pipeline_stage_id": 2482007, + "primary_contact_id": 67637400, + "priority": "None", + "status": "Open", + "tags": [], + "interaction_count": 19, + "monetary_unit": "USD", + "monetary_value": 150000.0, + "converted_unit": None, + "converted_value": None, + "win_probability": 0, + "date_stage_changed": 1539870749, + "date_last_contacted": 1555534313, + "leads_converted_from": [], + "date_lead_created": None, + "date_created": 1539870749, "date_modified": 1555550658 + } + ]) - processed_opps_cf = Table( - [ - { - "id": 14340759, - "custom_fields_custom_field_definition_id": 272931, - "custom_fields_value": [], - }, - { - "id": 14340759, - "custom_fields_custom_field_definition_id": 272927, - "custom_fields_value": None, - }, - { - "id": 14161592, - "custom_fields_custom_field_definition_id": 272931, - "custom_fields_value": [], - }, - { - "id": 14161592, - "custom_fields_custom_field_definition_id": 272927, - "custom_fields_value": None, - }, - { - "id": 14286548, - "custom_fields_custom_field_definition_id": 272931, - "custom_fields_value": [], - }, - { - "id": 14286548, - "custom_fields_custom_field_definition_id": 272927, - "custom_fields_value": None, - }, - ] - ) + processed_opps_cf = Table([ + { + "id": 14340759, + "custom_fields_custom_field_definition_id": 272931, + "custom_fields_value": [] + }, + { + "id": 14340759, + "custom_fields_custom_field_definition_id": 272927, + "custom_fields_value": None + }, + { + "id": 14161592, + "custom_fields_custom_field_definition_id": 272931, + "custom_fields_value": [] + }, + { + "id": 14161592, + "custom_fields_custom_field_definition_id": 272927, + "custom_fields_value": None + }, + { + "id": 14286548, + "custom_fields_custom_field_definition_id": 272931, + "custom_fields_value": [] + }, + { + "id": 14286548, + "custom_fields_custom_field_definition_id": 272927, + "custom_fields_value": None + } + ]) m.post( - self.cp.uri + "/opportunities/search", + self.cp.uri + '/opportunities/search', json=self.paginate_callback, - headers={"filename": "opportunities_search.json"}, - ) + headers={"filename": "opportunities_search.json"}) processed_blob = self.cp.get_opportunities() - blob_opps = [f for f in processed_blob if f["name"] == "opportunities"][0][ - "tbl" - ] - blob_opps_cf = [ - f for f in processed_blob if f["name"] == "opportunities_custom_fields" - ] - blob_opps_cf = blob_opps_cf[0]["tbl"] + blob_opps = [f for f in processed_blob if f['name'] == "opportunities"][0]['tbl'] + blob_opps_cf = [f for f in processed_blob if f['name'] == "opportunities_custom_fields"] + blob_opps_cf = blob_opps_cf[0]['tbl'] assert_matching_tables(processed_opps, blob_opps) assert_matching_tables(processed_opps_cf, blob_opps_cf) @@ -916,119 +700,54 @@ def test_get_opportunities2(self, m): @requests_mock.Mocker() def test_get_companies(self, m): - processed_companies = Table( - [ - { - "id": 35015567, - "name": "Company One", - "assignee_id": None, - "contact_type_id": 547508, - "details": None, - "email_domain": "companyone@fake.nope", - "tags": [], - "interaction_count": 1, - "date_created": 1558441519, - "date_modified": 1558441535, - "address_city": "CityA", - "address_country": None, - "address_postal_code": "12345", - "address_state": "New York", - "address_street": None, - }, - { - "id": 35026533, - "name": "Company Two", - "assignee_id": None, - "contact_type_id": 547508, - "details": None, - "email_domain": "companytwo@fake.nope", - "tags": [], - "interaction_count": 1, - "date_created": 1558452953, - "date_modified": 1558452967, - "address_city": "CityB", - "address_country": None, - "address_postal_code": "23451", - "address_state": "New York", - "address_street": None, - }, - { - "id": 35014973, - "name": "Company Three", - "assignee_id": None, - "contact_type_id": 547508, - "details": None, - "email_domain": None, - "tags": [], - "interaction_count": 1, - "date_created": 1558434147, - "date_modified": 1558458137, - "address_city": None, - "address_country": None, - "address_postal_code": "34512", - "address_state": "Alabama", - "address_street": None, - }, - { - "id": 35029116, - "name": "Company Four", - "assignee_id": None, - "contact_type_id": 547508, - "details": None, - "email_domain": "companyfour@fake.nope", - "tags": [], - "interaction_count": 0, - "date_created": 1558461301, - "date_modified": 1558461301, - "address_city": "CityD ", - "address_country": None, - "address_postal_code": "45123", - "address_state": "California", - "address_street": None, - }, - { - "id": 35082308, - "name": "Company Five", - "assignee_id": None, - "contact_type_id": 547508, - "details": None, - "email_domain": "companyfive@fake.nope", - "tags": [], - "interaction_count": 1, - "date_created": 1558639445, - "date_modified": 1558639459, - "address_city": "CityE", - "address_country": None, - "address_postal_code": "51234", - "address_state": "Arizona", - "address_street": None, - }, - ] - ) - - processed_companies_phones = Table( - [ - { - "id": 35082308, - "phone_numbers_category": "work", - "phone_numbers_number": "123-555-9876", - } - ] - ) + processed_companies = Table([ + { + 'id': 35015567, 'name': 'Company One', 'assignee_id': None, + 'contact_type_id': 547508, 'details': None, 'email_domain': 'companyone@fake.nope', + 'tags': [], 'interaction_count': 1, 'date_created': 1558441519, + 'date_modified': 1558441535, 'address_city': 'CityA', 'address_country': None, + 'address_postal_code': '12345', 'address_state': 'New York', 'address_street': None, + }, { + 'id': 35026533, 'name': 'Company Two', 'assignee_id': None, + 'contact_type_id': 547508, 'details': None, 'email_domain': 'companytwo@fake.nope', + 'tags': [], 'interaction_count': 1, 'date_created': 1558452953, + 'date_modified': 1558452967, 'address_city': 'CityB', 'address_country': None, + 'address_postal_code': '23451', 'address_state': 'New York', 'address_street': None, + }, { + 'id': 35014973, 'name': 'Company Three', 'assignee_id': None, + 'contact_type_id': 547508, 'details': None, 'email_domain': None, 'tags': [], + 'interaction_count': 1, 'date_created': 1558434147, 'date_modified': 1558458137, + 'address_city': None, 'address_country': None, 'address_postal_code': '34512', + 'address_state': 'Alabama', 'address_street': None, + }, { + 'id': 35029116, 'name': 'Company Four', 'assignee_id': None, + 'contact_type_id': 547508, 'details': None, 'email_domain': 'companyfour@fake.nope', + 'tags': [], 'interaction_count': 0, 'date_created': 1558461301, + 'date_modified': 1558461301, 'address_city': 'CityD ', 'address_country': None, + 'address_postal_code': '45123', 'address_state': 'California', + 'address_street': None + }, { + 'id': 35082308, 'name': 'Company Five', 'assignee_id': None, + 'contact_type_id': 547508, 'details': None, 'email_domain': 'companyfive@fake.nope', + 'tags': [], 'interaction_count': 1, 'date_created': 1558639445, + 'date_modified': 1558639459, 'address_city': 'CityE', 'address_country': None, + 'address_postal_code': '51234', 'address_state': 'Arizona', 'address_street': None + } + ]) + + processed_companies_phones = Table([{ + 'id': 35082308, 'phone_numbers_category': 'work', 'phone_numbers_number': '123-555-9876' + }]) m.post( - self.cp.uri + "/companies/search", + self.cp.uri + '/companies/search', json=self.paginate_callback, - headers={"filename": "companies_search.json"}, - ) + headers={"filename": "companies_search.json"}) processed_blob = self.cp.get_companies() - blob_companies = [f for f in processed_blob if f["name"] == "companies"][0][ - "tbl" - ] + blob_companies = [f for f in processed_blob if f['name'] == "companies"][0]['tbl'] blob_companies_phones = [ - f for f in processed_blob if f["name"] == "companies_phone_numbers" - ][0]["tbl"] + f for f in processed_blob if f['name'] == "companies_phone_numbers"][0]['tbl'] assert_matching_tables(processed_companies, blob_companies) assert_matching_tables(processed_companies_phones, blob_companies_phones) @@ -1036,62 +755,33 @@ def test_get_companies(self, m): @requests_mock.Mocker() def test_get_activities(self, m): - processed_activities = Table( - [ - { - "id": 5369412841, - "user_id": 289533, - "details": None, - "activity_date": 1554149472, - "old_value": None, - "new_value": None, - "date_created": 1554149472, - "date_modified": 1554149472, - "parent_id": 76469872, - "parent_type": "person", - "type_category": "system", - "type_id": 1, - }, - { - "id": 5223481640, - "user_id": 377343, - "details": None, - "activity_date": 1550789277, - "old_value": None, - "new_value": None, - "date_created": 1550789277, - "date_modified": 1550789277, - "parent_id": 28465522, - "parent_type": "person", - "type_category": "system", - "type_id": 1, - }, - { - "id": 5185524266, - "user_id": 703426, - "details": None, - "activity_date": 1549983210, - "old_value": None, - "new_value": None, - "date_created": 1549983210, - "date_modified": 1549983210, - "parent_id": 12035585, - "parent_type": "company", - "type_category": "system", - "type_id": 1, - }, - ] - ) + processed_activities = Table([ + { + 'id': 5369412841, 'user_id': 289533, 'details': None, 'activity_date': 1554149472, + 'old_value': None, 'new_value': None, 'date_created': 1554149472, + 'date_modified': 1554149472, 'parent_id': 76469872, 'parent_type': 'person', + 'type_category': 'system', 'type_id': 1 + }, { + 'id': 5223481640, 'user_id': 377343, 'details': None, 'activity_date': 1550789277, + 'old_value': None, 'new_value': None, 'date_created': 1550789277, + 'date_modified': 1550789277, 'parent_id': 28465522, 'parent_type': 'person', + 'type_category': 'system', 'type_id': 1 + }, { + 'id': 5185524266, 'user_id': 703426, 'details': None, 'activity_date': 1549983210, + 'old_value': None, 'new_value': None, 'date_created': 1549983210, + 'date_modified': 1549983210, 'parent_id': 12035585, 'parent_type': 'company', + 'type_category': 'system', 'type_id': 1 + } + ]) m.post( - self.cp.uri + "/activities/search", + self.cp.uri + '/activities/search', json=self.paginate_callback, - headers={"filename": "activities_search.json"}, - ) + headers={"filename": "activities_search.json"}) processed_blob = self.cp.get_activities() # No nested columns in Actvities - blob_activities = processed_blob[0]["tbl"] + blob_activities = processed_blob[0]['tbl'] assert_matching_tables(processed_activities, blob_activities) @@ -1099,108 +789,69 @@ def test_get_activities(self, m): def test_get_custom_fields(self, m): m.get( - self.cp.uri + "/custom_field_definitions/", + self.cp.uri + '/custom_field_definitions/', json=self.paginate_callback, - headers={"filename": "custom_fields_search.json"}, - ) + headers={"filename": "custom_fields_search.json"}) processed_blob = self.cp.get_custom_fields() - self.assertTrue( - [f["name"] for f in processed_blob] == self.custom_field_table_names - ) + self.assertTrue([f['name'] for f in processed_blob] == self.custom_field_table_names) for tbl in self.custom_field_table_names: assert_matching_tables( - [f["tbl"] for f in processed_blob if f["name"] == tbl][0], - self.custom_field_tables[tbl], + [f['tbl'] for f in processed_blob if f['name'] == tbl][0], + self.custom_field_tables[tbl] ) @requests_mock.Mocker() def test_get_activity_types(self, m): - processed_at = Table( - [ - { - "category": "system", - "count_as_interaction": False, - "id": 1, - "is_disabled": False, - "name": "Property Changed", - }, - { - "category": "system", - "count_as_interaction": False, - "id": 3, - "is_disabled": False, - "name": "Pipeline Stage Changed", - }, - { - "category": "user", - "count_as_interaction": False, - "id": 0, - "is_disabled": False, - "name": "Note", - }, - { - "category": "user", - "count_as_interaction": True, - "id": 504464, - "is_disabled": False, - "name": "Mail", - }, - { - "category": "user", - "count_as_interaction": True, - "id": 248465, - "is_disabled": False, - "name": "Stories from the Field", - }, - { - "category": "user", - "count_as_interaction": True, - "id": 236962, - "is_disabled": False, - "name": "Press Coverage", - }, - ] - ) + processed_at = Table([ + {"category": "system", "count_as_interaction": False, "id": 1, "is_disabled": False, + "name": "Property Changed"}, + {"category": "system", "count_as_interaction": False, "id": 3, "is_disabled": False, + "name": "Pipeline Stage Changed"}, + {"category": "user", "count_as_interaction": False, "id": 0, "is_disabled": False, + "name": "Note"}, + {"category": "user", "count_as_interaction": True, "id": 504464, "is_disabled": False, + "name": "Mail"}, + {"category": "user", "count_as_interaction": True, "id": 248465, "is_disabled": False, + "name": "Stories from the Field"}, + {"category": "user", "count_as_interaction": True, "id": 236962, "is_disabled": False, + "name": "Press Coverage"} + ]) m.get( - self.cp.uri + "/activity_types/", + self.cp.uri + '/activity_types/', json=self.paginate_callback, - headers={"filename": "activity_types_list.json"}, - ) + headers={"filename": "activity_types_list.json"}) processed_blob = self.cp.get_activity_types() # No nested columns in Activity Types - blob_at = processed_blob[0]["tbl"] + blob_at = processed_blob[0]['tbl'] assert_matching_tables(processed_at, blob_at) @requests_mock.Mocker() def test_get_contact_types(self, m): - processed_ct = Table( - [ - {"id": 501947, "name": "Potential Customer"}, - {"id": 501948, "name": "Current Customer"}, - {"id": 501949, "name": "Uncategorized"}, - {"id": 501950, "name": "Group Leader"}, - {"id": 540331, "name": "Partner"}, - {"id": 540333, "name": "Funder"}, - {"id": 540334, "name": "Potential Funder"}, - {"id": 540335, "name": "Other"}, - {"id": 547508, "name": "Local Group"}, - {"id": 575833, "name": "Group Member"}, - {"id": 744795, "name": "Hill Contact"}, - {"id": 967249, "name": "State Leg Contact"}, - ] - ) + processed_ct = Table([ + {'id': 501947, 'name': 'Potential Customer'}, + {'id': 501948, 'name': 'Current Customer'}, + {'id': 501949, 'name': 'Uncategorized'}, + {'id': 501950, 'name': 'Group Leader'}, + {'id': 540331, 'name': 'Partner'}, + {'id': 540333, 'name': 'Funder'}, + {'id': 540334, 'name': 'Potential Funder'}, + {'id': 540335, 'name': 'Other'}, + {'id': 547508, 'name': 'Local Group'}, + {'id': 575833, 'name': 'Group Member'}, + {'id': 744795, 'name': 'Hill Contact'}, + {'id': 967249, 'name': 'State Leg Contact'} + ]) m.get( - self.cp.uri + "/contact_types/", + self.cp.uri + '/contact_types/', json=self.paginate_callback, - headers={"filename": "contact_types_list.json"}, - ) + headers={"filename": "contact_types_list.json"}) processed_blob = self.cp.get_contact_types() assert_matching_tables(processed_ct, processed_blob) diff --git a/test/test_credential_tools.py b/test/test_credential_tools.py index 6c59b46a68..f25072955a 100644 --- a/test/test_credential_tools.py +++ b/test/test_credential_tools.py @@ -6,17 +6,20 @@ class TestCredentialTool(unittest.TestCase): + def setUp(self): - os.environ["TES_VAR1"] = "variable1" - os.environ["TES_VAR2"] = "variable2" + os.environ['TES_VAR1'] = 'variable1' + os.environ['TES_VAR2'] = 'variable2' self.tmp_folder = "tmp" self.json_file = "credentials.json" os.mkdir(self.tmp_folder) - with open(f"{self.tmp_folder}/{self.json_file}", "w") as f: - f.write(json.dumps({"json": "file"})) + with open(f"{self.tmp_folder}/{self.json_file}", 'w') as f: + f.write(json.dumps({ + "json": "file" + })) def tearDown(self): @@ -24,22 +27,23 @@ def tearDown(self): shutil.rmtree(self.tmp_folder) def test_decode_credential(self): - encoded_cred = ( - "PRSNSENVeyJFTkNfVkFSMSI6ICJlbmNvZGVkLXZhcmlhYmxl" - "LTEiLCAiRU5DX1ZBUjIiOiAiZW5jLXZhci0yIn0=" - ) + encoded_cred = ("PRSNSENVeyJFTkNfVkFSMSI6ICJlbmNvZGVkLXZhcmlhYmxl" + "LTEiLCAiRU5DX1ZBUjIiOiAiZW5jLXZhci0yIn0=") - expected = {"ENC_VAR1": "encoded-variable-1", "ENC_VAR2": "enc-var-2"} + expected = { + "ENC_VAR1": "encoded-variable-1", + "ENC_VAR2": "enc-var-2"} - self.assertDictEqual(ct.decode_credential(encoded_cred, export=False), expected) + self.assertDictEqual( + ct.decode_credential(encoded_cred, export=False), expected) def test_decode_credential_export(self): - encoded_cred = ( - "PRSNSENVeyJFTkNfVkFSMSI6ICJlbmNvZGVkLXZhcmlhYmxl" - "LTEiLCAiRU5DX1ZBUjIiOiAiZW5jLXZhci0yIn0=" - ) + encoded_cred = ("PRSNSENVeyJFTkNfVkFSMSI6ICJlbmNvZGVkLXZhcmlhYmxl" + "LTEiLCAiRU5DX1ZBUjIiOiAiZW5jLXZhci0yIn0=") - expected = {"ENC_VAR1": "encoded-variable-1", "ENC_VAR2": "enc-var-2"} + expected = { + "ENC_VAR1": "encoded-variable-1", + "ENC_VAR2": "enc-var-2"} self.assertNotIn("ENC_VAR1", os.environ) self.assertNotIn("ENC_VAR2", os.environ) @@ -49,16 +53,16 @@ def test_decode_credential_export(self): self.assertIn("ENC_VAR1", os.environ) self.assertIn("ENC_VAR2", os.environ) - self.assertEqual(os.environ["ENC_VAR1"], expected["ENC_VAR1"]) - self.assertEqual(os.environ["ENC_VAR2"], expected["ENC_VAR2"]) + self.assertEqual(os.environ['ENC_VAR1'], expected['ENC_VAR1']) + self.assertEqual(os.environ['ENC_VAR2'], expected['ENC_VAR2']) def test_decode_credential_save(self): - encoded_cred = ( - "PRSNSENVeyJFTkNfVkFSMSI6ICJlbmNvZGVkLXZhcmlhYmxl" - "LTEiLCAiRU5DX1ZBUjIiOiAiZW5jLXZhci0yIn0=" - ) + encoded_cred = ("PRSNSENVeyJFTkNfVkFSMSI6ICJlbmNvZGVkLXZhcmlhYmxl" + "LTEiLCAiRU5DX1ZBUjIiOiAiZW5jLXZhci0yIn0=") - expected = {"ENC_VAR1": "encoded-variable-1", "ENC_VAR2": "enc-var-2"} + expected = { + "ENC_VAR1": "encoded-variable-1", + "ENC_VAR2": "enc-var-2"} file_path = f"{self.tmp_folder}/saved_credentials.json" self.assertFalse(os.path.isfile(file_path)) @@ -67,7 +71,7 @@ def test_decode_credential_save(self): self.assertTrue(os.path.isfile(file_path)) - with open(file_path, "r") as f: + with open(file_path, 'r') as f: cred = json.load(f) self.assertDictEqual(cred, expected) @@ -87,14 +91,13 @@ def test_encode_from_json_file(self): json_path = f"{self.tmp_folder}/{self.json_file}" expected = "PRSNSENVeyJqc29uIjogImZpbGUifQ==" - self.assertEqual(ct.encode_from_json_file(json_path), expected) + self.assertEqual( + ct.encode_from_json_file(json_path), expected) def testencode_from_env(self): - lst = ["TES_VAR1", "TES_VAR2"] - expected = ( - "PRSNSENVeyJURVNfVkFSMSI6ICJ2YXJpYWJsZTEiLCAiVEVTX1ZBU" - "jIiOiAidmFyaWFibGUyIn0=" - ) + lst = ['TES_VAR1', 'TES_VAR2'] + expected = ("PRSNSENVeyJURVNfVkFSMSI6ICJ2YXJpYWJsZTEiLCAiVEVTX1ZBU" + "jIiOiAidmFyaWFibGUyIn0=") self.assertEqual(ct.encode_from_env(lst), expected) diff --git a/test/test_crowdtangle/leaderboard.py b/test/test_crowdtangle/leaderboard.py index 0b62bc651a..5b72405758 100644 --- a/test/test_crowdtangle/leaderboard.py +++ b/test/test_crowdtangle/leaderboard.py @@ -1,6737 +1,6937 @@ # flake8: noqa expected_leaderboard = { - "status": 200, - "result": { - "accountStatistics": [ - { - "account": { - "id": 6897, - "name": "Fox News", - "handle": "FoxNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22519337_10156158270486336_6810712156586627746_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlXNhWwb8bfCyDXwZo8N1dsslewpEwDTilUVrDkK4ie4qoq_SHj__a9Ws-O0Hsa97M&_nc_ht=scontent.xx&oh=485819e2e49151fcf033722359d3e1a7&oe=5DFF0F55", - "subscriberCount": 17163279, - "url": "https://www.facebook.com/15704546335", - "platform": "Facebook", - "platformId": "15704546335", - "verified": True, - }, - "summary": { - "loveCount": 47232, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 572261, - "wowCount": 22391, - "thankfulCount": 0, - "interactionRate": 0.0694641272581413, - "likeCount": 226852, - "hahaCount": 41859, - "commentCount": 84712, - "shareCount": 89770, - "sadCount": 15405, - "angryCount": 44040, - "totalVideoTimeMS": 914338, - "postCount": 48, - }, - "breakdown": { - "native_video": { - "loveCount": 1167, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 12545, - "wowCount": 852, - "thankfulCount": 0, - "interactionRate": 0.018272060315511752, - "likeCount": 6028, - "hahaCount": 731, - "commentCount": 903, - "shareCount": 2654, - "sadCount": 28, - "angryCount": 182, - "totalVideoTimeMS": 486605, - "postCount": 4, - }, - "owned_video": { - "loveCount": 1347, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 23723, - "wowCount": 916, - "thankfulCount": 0, - "interactionRate": 0.02303235153929144, - "likeCount": 7203, - "hahaCount": 3204, - "commentCount": 6396, - "shareCount": 2991, - "sadCount": 109, - "angryCount": 1557, - "totalVideoTimeMS": 486605, - "postCount": 6, - }, - "crosspost": { - "shareCount": 337, - "loveCount": 180, - "totalInteractionCount": 11178, - "wowCount": 64, - "sadCount": 81, - "angryCount": 1375, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.03256458708654183, - "likeCount": 1175, - "hahaCount": 2473, - "commentCount": 5493, - }, - "link": { - "shareCount": 59011, - "loveCount": 19647, - "totalInteractionCount": 299339, - "wowCount": 19416, - "sadCount": 10916, - "angryCount": 29091, - "thankfulCount": 0, - "postCount": 35, - "interactionRate": 0.049828654278780776, - "likeCount": 105531, - "hahaCount": 18405, - "commentCount": 37322, - }, - "photo": { - "shareCount": 27536, - "loveCount": 26159, - "totalInteractionCount": 247051, - "wowCount": 1991, - "sadCount": 4208, - "angryCount": 13375, - "thankfulCount": 0, - "postCount": 6, - "interactionRate": 0.23990818988877438, - "likeCount": 112630, - "hahaCount": 20239, - "commentCount": 40913, - }, - "share": { - "loveCount": 79, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 2148, - "wowCount": 68, - "thankfulCount": 0, - "interactionRate": 0.01251542906815027, - "likeCount": 1488, - "hahaCount": 11, - "commentCount": 81, - "shareCount": 232, - "sadCount": 172, - "angryCount": 17, - "totalVideoTimeMS": 427733, - "postCount": 1, - }, - }, - "subscriberData": {"initialCount": 17162352, "finalCount": 17163279}, - }, - { - "account": { - "id": 12431, - "name": "Occupy Democrats", - "handle": "OccupyDemocrats", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/45537032_2403174426442264_4326005542884474880_n.png?_nc_cat=1&_nc_oc=AQk1aVoZuS5iEgQ8jJOAnRArWGeM1nOF7Qx0IOdloEmSEgqG2JK08hhJzKOuJJ3fHcw&_nc_ht=scontent.xx&oh=1020f841a2c9e7ceddc491ff0d6a83dc&oe=5E051B61", - "subscriberCount": 7872091, - "url": "https://www.facebook.com/346937065399354", - "platform": "Facebook", - "platformId": "346937065399354", - "verified": True, - }, - "summary": { - "loveCount": 11586, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 412395, - "wowCount": 2699, - "thankfulCount": 0, - "interactionRate": 0.17463810331280705, - "likeCount": 132321, - "hahaCount": 74689, - "commentCount": 27137, - "shareCount": 103448, - "sadCount": 37188, - "angryCount": 23327, - "totalVideoTimeMS": 446099, - "postCount": 30, - }, - "breakdown": { - "owned_video": { - "loveCount": 147, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 14936, - "wowCount": 241, - "thankfulCount": 0, - "interactionRate": 0.18975663546341384, - "likeCount": 1866, - "hahaCount": 3389, - "commentCount": 4775, - "shareCount": 2215, - "sadCount": 184, - "angryCount": 2119, - "totalVideoTimeMS": 446099, - "postCount": 1, - }, - "crosspost": { - "loveCount": 147, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 14936, - "wowCount": 241, - "thankfulCount": 0, - "interactionRate": 0.18975663546341384, - "likeCount": 1866, - "hahaCount": 3389, - "commentCount": 4775, - "shareCount": 2215, - "sadCount": 184, - "angryCount": 2119, - "totalVideoTimeMS": 446099, - "postCount": 1, - }, - "link": { - "shareCount": 4074, - "loveCount": 1045, - "totalInteractionCount": 24781, - "wowCount": 678, - "sadCount": 373, - "angryCount": 3896, - "thankfulCount": 0, - "postCount": 13, - "interactionRate": 0.024215060738702915, - "likeCount": 9224, - "hahaCount": 1646, - "commentCount": 3845, - }, - "photo": { - "shareCount": 97159, - "loveCount": 10394, - "totalInteractionCount": 372678, - "wowCount": 1780, - "sadCount": 36631, - "angryCount": 17312, - "thankfulCount": 0, - "postCount": 16, - "interactionRate": 0.29591668138817856, - "likeCount": 121231, - "hahaCount": 69654, - "commentCount": 18517, - }, - }, - "subscriberData": {"initialCount": 7870178, "finalCount": 7872091}, - }, - { - "account": { - "id": 8323, - "name": "CNN", - "handle": "cnn", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12289622_10154246192721509_1897912583584847639_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQnmWKpivHkplQlHvH6RU7ER1noSOq6saypKUuDbSnV0FWNYEYghmJGPxBpmhJO8UsU&_nc_ht=scontent.xx&oh=12e2b35de35132a27c2772d3fe565936&oe=5DF3AC02", - "subscriberCount": 31389797, - "url": "https://www.facebook.com/5550296508", - "platform": "Facebook", - "platformId": "5550296508", - "verified": True, - }, - "summary": { - "loveCount": 40425, - "threePlusMinuteVideoCount": 19, - "totalInteractionCount": 343516, - "wowCount": 14626, - "thankfulCount": 0, - "interactionRate": 0.024317314415293154, - "likeCount": 146903, - "hahaCount": 15613, - "commentCount": 32704, - "shareCount": 65487, - "sadCount": 19242, - "angryCount": 8516, - "totalVideoTimeMS": 5541666, - "postCount": 45, - }, - "breakdown": { - "owned_video": { - "loveCount": 6286, - "threePlusMinuteVideoCount": 18, - "totalInteractionCount": 101691, - "wowCount": 3110, - "thankfulCount": 0, - "interactionRate": 0.01799666043914464, - "likeCount": 43832, - "hahaCount": 2764, - "commentCount": 13059, - "shareCount": 21216, - "sadCount": 8587, - "angryCount": 2837, - "totalVideoTimeMS": 5302033, - "postCount": 18, - }, - "crosspost": { - "loveCount": 6286, - "threePlusMinuteVideoCount": 18, - "totalInteractionCount": 101691, - "wowCount": 3110, - "thankfulCount": 0, - "interactionRate": 0.01799666043914464, - "likeCount": 43832, - "hahaCount": 2764, - "commentCount": 13059, - "shareCount": 21216, - "sadCount": 8587, - "angryCount": 2837, - "totalVideoTimeMS": 5302033, - "postCount": 18, - }, - "link": { - "shareCount": 44085, - "loveCount": 34111, - "totalInteractionCount": 240792, - "wowCount": 11485, - "sadCount": 10649, - "angryCount": 5675, - "thankfulCount": 0, - "postCount": 25, - "interactionRate": 0.030682569780386266, - "likeCount": 102525, - "hahaCount": 12792, - "commentCount": 19470, - }, - "share": { - "loveCount": 28, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 1033, - "wowCount": 31, - "thankfulCount": 0, - "interactionRate": 0.0016438797639579806, - "likeCount": 546, - "hahaCount": 57, - "commentCount": 175, - "shareCount": 186, - "sadCount": 6, - "angryCount": 4, - "totalVideoTimeMS": 239633, - "postCount": 2, - }, - }, - "subscriberData": {"initialCount": 31388517, "finalCount": 31389797}, - }, - { - "account": { - "id": 6893, - "name": "Breitbart", - "handle": "Breitbart", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/52553630_10162546333950354_957315555642048512_n.jpg?_nc_cat=1&_nc_oc=AQk8nx3izP_SkwUd5mI2QxO4HjJhMpRt5ggDp60YEZ10o7ZdAbnqH-Zl61Z9gCXAUT8&_nc_ht=scontent.xx&oh=163e2b6e11a05fc602bdbfbe245a4a0e&oe=5E125439", - "subscriberCount": 4051767, - "url": "https://www.facebook.com/95475020353", - "platform": "Facebook", - "platformId": "95475020353", - "verified": True, - }, - "summary": { - "shareCount": 34233, - "loveCount": 2121, - "totalInteractionCount": 180913, - "wowCount": 6217, - "sadCount": 7025, - "angryCount": 35424, - "thankfulCount": 0, - "postCount": 44, - "interactionRate": 0.10146013999821069, - "likeCount": 33695, - "hahaCount": 21554, - "commentCount": 40644, - }, - "breakdown": { - "link": { - "shareCount": 32055, - "loveCount": 1158, - "totalInteractionCount": 168057, - "wowCount": 6134, - "sadCount": 7023, - "angryCount": 35416, - "thankfulCount": 0, - "postCount": 43, - "interactionRate": 0.0964500674076885, - "likeCount": 24757, - "hahaCount": 21518, - "commentCount": 39996, - }, - "photo": { - "shareCount": 2178, - "loveCount": 963, - "totalInteractionCount": 12856, - "wowCount": 83, - "sadCount": 2, - "angryCount": 8, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.31728814395937643, - "likeCount": 8938, - "hahaCount": 36, - "commentCount": 648, - }, - }, - "subscriberData": {"initialCount": 4051908, "finalCount": 4051767}, - }, - { - "account": { - "id": 7132, - "name": "The New York Times", - "handle": "nytimes", - "profileImage": "https://scontent.xx.fbcdn.net/v/t34.0-1/p200x200/38987133_2766049203424553_1238434690_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkaWRCuHf9GL6ACpzc33xhzk0PaoZZpZJjgHAUJqYB_x5SH2TI2LqBRTlosS59Dtlw&_nc_ht=scontent.xx&oh=6c30114417175d395e99d2e75167ad16&oe=5D765D57", - "subscriberCount": 16854715, - "url": "https://www.facebook.com/5281959998", - "platform": "Facebook", - "platformId": "5281959998", - "verified": True, - }, - "summary": { - "shareCount": 17541, - "loveCount": 3889, - "totalInteractionCount": 102490, - "wowCount": 6687, - "sadCount": 18117, - "angryCount": 13956, - "thankfulCount": 0, - "postCount": 61, - "interactionRate": 0.009967688669212105, - "likeCount": 30490, - "hahaCount": 3317, - "commentCount": 8493, - }, - "breakdown": { - "link": { - "shareCount": 17541, - "loveCount": 3889, - "totalInteractionCount": 102490, - "wowCount": 6687, - "sadCount": 18117, - "angryCount": 13956, - "thankfulCount": 0, - "postCount": 61, - "interactionRate": 0.009967688669212105, - "likeCount": 30490, - "hahaCount": 3317, - "commentCount": 8493, - } - }, - "subscriberData": {"initialCount": 16854203, "finalCount": 16854715}, - }, - { - "account": { - "id": 6149, - "name": "NPR", - "handle": "NPR", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/392453_10150756268711756_1078337478_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQkCimbrOrcgFhsAxAA1U5koNLGX9OLyOXdvEKxfRI0_6KYiFljw87Kls85nrj6clWA&_nc_ht=scontent.xx&oh=1883b0436c2dd854062b47c02250e87b&oe=5DF7D154", - "subscriberCount": 6596236, - "url": "https://www.facebook.com/10643211755", - "platform": "Facebook", - "platformId": "10643211755", - "verified": True, - }, - "summary": { - "shareCount": 19847, - "loveCount": 2069, - "totalInteractionCount": 101386, - "wowCount": 7700, - "sadCount": 19013, - "angryCount": 19064, - "thankfulCount": 0, - "postCount": 24, - "interactionRate": 0.06403652992957347, - "likeCount": 21241, - "hahaCount": 3811, - "commentCount": 8641, - }, - "breakdown": { - "link": { - "shareCount": 19847, - "loveCount": 2069, - "totalInteractionCount": 101386, - "wowCount": 7700, - "sadCount": 19013, - "angryCount": 19064, - "thankfulCount": 0, - "postCount": 24, - "interactionRate": 0.06403652992957347, - "likeCount": 21241, - "hahaCount": 3811, - "commentCount": 8641, - } - }, - "subscriberData": {"initialCount": 6596234, "finalCount": 6596236}, - }, - { - "account": { - "id": 279876, - "name": "Mad World News", - "handle": "MadWorldNewsCorp", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16649435_1331399193565304_7598140519586777175_n.png?_nc_cat=1&_nc_oc=AQkFf7jm82V9pnSg1x0Pqt0rlA2Yl-XqrdIF4h-iVA0BzRc8fXvud27Fd5_bf3n4adY&_nc_ht=scontent.xx&oh=db1ac67cb2a4dc589f0e879b97477ebd&oe=5E151F16", - "subscriberCount": 2135169, - "url": "https://www.facebook.com/513813158657249", - "platform": "Facebook", - "platformId": "513813158657249", - "verified": False, - }, - "summary": { - "shareCount": 30872, - "loveCount": 4854, - "totalInteractionCount": 98090, - "wowCount": 269, - "sadCount": 847, - "angryCount": 3939, - "thankfulCount": 0, - "postCount": 18, - "interactionRate": 0.2552741350943142, - "likeCount": 47861, - "hahaCount": 494, - "commentCount": 8954, - }, - "breakdown": { - "link": { - "shareCount": 4373, - "loveCount": 331, - "totalInteractionCount": 15848, - "wowCount": 213, - "sadCount": 370, - "angryCount": 3925, - "thankfulCount": 0, - "postCount": 14, - "interactionRate": 0.053031807841211906, - "likeCount": 2660, - "hahaCount": 48, - "commentCount": 3928, - }, - "photo": { - "shareCount": 26499, - "loveCount": 4523, - "totalInteractionCount": 82242, - "wowCount": 56, - "sadCount": 477, - "angryCount": 14, - "thankfulCount": 0, - "postCount": 4, - "interactionRate": 0.9631925523103504, - "likeCount": 45201, - "hahaCount": 446, - "commentCount": 5026, - }, - }, - "subscriberData": {"initialCount": 2133967, "finalCount": 2135169}, - }, - { - "account": { - "id": 13878, - "name": "ABC News", - "handle": "ABCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02", - "subscriberCount": 14196629, - "url": "https://www.facebook.com/86680728811", - "platform": "Facebook", - "platformId": "86680728811", - "verified": True, - }, - "summary": { - "loveCount": 7418, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 92879, - "wowCount": 4940, - "thankfulCount": 0, - "interactionRate": 0.00991103629816666, - "likeCount": 31672, - "hahaCount": 5093, - "commentCount": 13711, - "shareCount": 14739, - "sadCount": 9408, - "angryCount": 5898, - "totalVideoTimeMS": 446345, - "postCount": 66, - }, - "breakdown": { - "native_video": { - "loveCount": 3, - "totalInteractionCount": 2737, - "wowCount": 1029, - "thankfulCount": 0, - "interactionRate": 0.019279677575040614, - "likeCount": 314, - "hahaCount": 14, - "commentCount": 234, - "shareCount": 871, - "sadCount": 270, - "angryCount": 2, - "totalVideoTimeMS": 36094, - "postCount": 1, - }, - "owned_video": { - "loveCount": 3449, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 22749, - "wowCount": 1965, - "thankfulCount": 0, - "interactionRate": 0.026704149684683584, - "likeCount": 9557, - "hahaCount": 227, - "commentCount": 3519, - "shareCount": 3383, - "sadCount": 555, - "angryCount": 94, - "totalVideoTimeMS": 446345, - "postCount": 6, - }, - "crosspost": { - "loveCount": 3446, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 20012, - "wowCount": 936, - "thankfulCount": 0, - "interactionRate": 0.028190452924849306, - "likeCount": 9243, - "hahaCount": 213, - "commentCount": 3285, - "shareCount": 2512, - "sadCount": 285, - "angryCount": 92, - "totalVideoTimeMS": 410251, - "postCount": 5, - }, - "link": { - "shareCount": 11189, - "loveCount": 3968, - "totalInteractionCount": 68995, - "wowCount": 2699, - "sadCount": 8643, - "angryCount": 5801, - "thankfulCount": 0, - "postCount": 59, - "interactionRate": 0.008234542595989214, - "likeCount": 21775, - "hahaCount": 4861, - "commentCount": 10059, - }, - "photo": { - "shareCount": 167, - "loveCount": 1, - "totalInteractionCount": 1135, - "wowCount": 276, - "sadCount": 210, - "angryCount": 3, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.007995043495678152, - "likeCount": 340, - "hahaCount": 5, - "commentCount": 133, - }, - }, - "subscriberData": {"initialCount": 14195962, "finalCount": 14196629}, - }, - { - "account": { - "id": 48728, - "name": "Faves USA", - "handle": "thefavesusa", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13590243_1529567430402751_5505197343663543097_n.jpg?_nc_cat=1&_nc_oc=AQlqHYa5f3hh3Tu7bwL_7yF5WVkxCnE2WIU8c_5Fs_eMudF84ODKZoLqn8S3lZDdt3g&_nc_ht=scontent.xx&oh=b45134ffcb1aa806ced2cb018887de04&oe=5E0ED98A", - "subscriberCount": 6323373, - "url": "https://www.facebook.com/532854420074062", - "platform": "Facebook", - "platformId": "532854420074062", - "verified": True, - }, - "summary": { - "loveCount": 4461, - "totalInteractionCount": 86313, - "wowCount": 1431, - "thankfulCount": 0, - "interactionRate": 0.04135428564425114, - "likeCount": 28142, - "hahaCount": 8631, - "commentCount": 13803, - "shareCount": 25692, - "sadCount": 1700, - "angryCount": 2453, - "totalVideoTimeMS": 298802, - "postCount": 33, - }, - "breakdown": { - "link": { - "shareCount": 10859, - "loveCount": 2743, - "totalInteractionCount": 37660, - "wowCount": 1198, - "sadCount": 1579, - "angryCount": 2400, - "thankfulCount": 0, - "postCount": 23, - "interactionRate": 0.02588794095588494, - "likeCount": 11476, - "hahaCount": 2813, - "commentCount": 4592, - }, - "photo": { - "shareCount": 865, - "loveCount": 257, - "totalInteractionCount": 12125, - "wowCount": 33, - "sadCount": 64, - "angryCount": 48, - "thankfulCount": 0, - "postCount": 3, - "interactionRate": 0.06390541808352537, - "likeCount": 3149, - "hahaCount": 104, - "commentCount": 7605, - }, - "share": { - "loveCount": 1461, - "totalInteractionCount": 36528, - "wowCount": 200, - "thankfulCount": 0, - "interactionRate": 0.08251880018803152, - "likeCount": 13517, - "hahaCount": 5714, - "commentCount": 1606, - "shareCount": 13968, - "sadCount": 57, - "angryCount": 5, - "totalVideoTimeMS": 298802, - "postCount": 7, - }, - }, - "subscriberData": {"initialCount": 6323442, "finalCount": 6323373}, - }, - { - "account": { - "id": 16403, - "name": "BBC News", - "handle": "bbcnews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/67191311_10156857876272217_4342089529688064000_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQk5kAdrSFMzze_w-lzADmQENwckqsjInhGPXnxTYNgxJpQ7siiGF44i0wivzxfUmPw&_nc_ht=scontent.xx&oh=5b9721d79e733db34cd496e566100993&oe=5DF5BFA1", - "subscriberCount": 49397882, - "url": "https://www.facebook.com/228735667216", - "platform": "Facebook", - "platformId": "228735667216", - "verified": True, - }, - "summary": { - "loveCount": 4060, - "threePlusMinuteVideoCount": 4, - "totalInteractionCount": 85997, - "wowCount": 2481, - "thankfulCount": 0, - "interactionRate": 0.007569588922429944, - "likeCount": 43029, - "hahaCount": 6026, - "commentCount": 11562, - "shareCount": 12934, - "sadCount": 4689, - "angryCount": 1216, - "totalVideoTimeMS": 1371143, - "postCount": 23, - }, - "breakdown": { - "native_video": { - "loveCount": 971, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 12803, - "wowCount": 74, - "thankfulCount": 0, - "interactionRate": 0.00863852258144118, - "likeCount": 5000, - "hahaCount": 887, - "commentCount": 1711, - "shareCount": 2232, - "sadCount": 1888, - "angryCount": 40, - "totalVideoTimeMS": 608830, - "postCount": 3, - }, - "owned_video": { - "loveCount": 2288, - "threePlusMinuteVideoCount": 4, - "totalInteractionCount": 36783, - "wowCount": 926, - "thankfulCount": 0, - "interactionRate": 0.01063669970538832, - "likeCount": 17727, - "hahaCount": 1521, - "commentCount": 3437, - "shareCount": 7420, - "sadCount": 3299, - "angryCount": 165, - "totalVideoTimeMS": 1371143, - "postCount": 7, - }, - "crosspost": { - "loveCount": 1317, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 23980, - "wowCount": 852, - "thankfulCount": 0, - "interactionRate": 0.012136850920023406, - "likeCount": 12727, - "hahaCount": 634, - "commentCount": 1726, - "shareCount": 5188, - "sadCount": 1411, - "angryCount": 125, - "totalVideoTimeMS": 762313, - "postCount": 4, - }, - "link": { - "shareCount": 5514, - "loveCount": 1772, - "totalInteractionCount": 49214, - "wowCount": 1555, - "sadCount": 1390, - "angryCount": 1051, - "thankfulCount": 0, - "postCount": 16, - "interactionRate": 0.006225323866400663, - "likeCount": 25302, - "hahaCount": 4505, - "commentCount": 8125, - }, - }, - "subscriberData": {"initialCount": 49392159, "finalCount": 49397882}, - }, - { - "account": { - "id": 13489, - "name": "The Daily Caller", - "handle": "DailyCaller", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471", - "subscriberCount": 5408115, - "url": "https://www.facebook.com/182919686769", - "platform": "Facebook", - "platformId": "182919686769", - "verified": True, - }, - "summary": { - "shareCount": 10978, - "loveCount": 1547, - "totalInteractionCount": 83887, - "wowCount": 1617, - "sadCount": 1729, - "angryCount": 8650, - "thankfulCount": 0, - "postCount": 52, - "interactionRate": 0.02982468613123435, - "likeCount": 21611, - "hahaCount": 17071, - "commentCount": 20684, - }, - "breakdown": { - "link": { - "shareCount": 10839, - "loveCount": 1540, - "totalInteractionCount": 80480, - "wowCount": 1600, - "sadCount": 1711, - "angryCount": 8392, - "thankfulCount": 0, - "postCount": 51, - "interactionRate": 0.02917752927159814, - "likeCount": 21302, - "hahaCount": 15684, - "commentCount": 19412, - }, - "photo": { - "shareCount": 139, - "loveCount": 7, - "totalInteractionCount": 3407, - "wowCount": 17, - "sadCount": 18, - "angryCount": 258, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.06299609773658738, - "likeCount": 309, - "hahaCount": 1387, - "commentCount": 1272, - }, - }, - "subscriberData": {"initialCount": 5408428, "finalCount": 5408115}, - }, - { - "account": { - "id": 8324, - "name": "MSNBC", - "handle": "msnbc", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15741035_1414682885294626_1846918595507309997_n.jpg?_nc_cat=1&_nc_oc=AQmNSDImiJ4dNS4a9BuTF3tFyF2W0xSOLxgQfdY6R_AXaZm8hkQc6XT-GWy5NIEe080&_nc_ht=scontent.xx&oh=968e2c2f1d76f19278ac5985b55af46d&oe=5E003BB2", - "subscriberCount": 2290512, - "url": "https://www.facebook.com/273864989376427", - "platform": "Facebook", - "platformId": "273864989376427", - "verified": True, - }, - "summary": { - "loveCount": 1671, - "threePlusMinuteVideoCount": 4, - "totalInteractionCount": 81269, - "wowCount": 2954, - "thankfulCount": 0, - "interactionRate": 0.0437899097220585, - "likeCount": 17184, - "hahaCount": 3886, - "commentCount": 12944, - "shareCount": 17257, - "sadCount": 4576, - "angryCount": 20797, - "totalVideoTimeMS": 1542583, - "postCount": 81, - }, - "breakdown": { - "native_video": { - "loveCount": 2, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 893, - "wowCount": 31, - "thankfulCount": 0, - "interactionRate": 0.019471884083786733, - "likeCount": 120, - "hahaCount": 2, - "commentCount": 71, - "shareCount": 147, - "sadCount": 518, - "angryCount": 2, - "totalVideoTimeMS": 1000636, - "postCount": 2, - }, - "owned_video": { - "loveCount": 5, - "threePlusMinuteVideoCount": 4, - "totalInteractionCount": 2327, - "wowCount": 58, - "thankfulCount": 0, - "interactionRate": 0.020301403809329214, - "likeCount": 470, - "hahaCount": 281, - "commentCount": 415, - "shareCount": 432, - "sadCount": 544, - "angryCount": 122, - "totalVideoTimeMS": 1542583, - "postCount": 5, - }, - "crosspost": { - "loveCount": 3, - "threePlusMinuteVideoCount": 3, - "totalInteractionCount": 1434, - "wowCount": 27, - "thankfulCount": 0, - "interactionRate": 0.020868969937331967, - "likeCount": 350, - "hahaCount": 279, - "commentCount": 344, - "shareCount": 285, - "sadCount": 26, - "angryCount": 120, - "totalVideoTimeMS": 541947, - "postCount": 3, - }, - "link": { - "shareCount": 16809, - "loveCount": 1609, - "totalInteractionCount": 78288, - "wowCount": 2887, - "sadCount": 4020, - "angryCount": 20635, - "thankfulCount": 0, - "postCount": 74, - "interactionRate": 0.04614749209991609, - "likeCount": 16280, - "hahaCount": 3593, - "commentCount": 12455, - }, - "photo": { - "shareCount": 16, - "loveCount": 57, - "totalInteractionCount": 654, - "wowCount": 9, - "sadCount": 12, - "angryCount": 40, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.014276471065915385, - "likeCount": 434, - "hahaCount": 12, - "commentCount": 74, - }, - }, - "subscriberData": {"initialCount": 2290452, "finalCount": 2290512}, - }, - { - "account": { - "id": 311636, - "name": "NowThis Politics", - "handle": "NowThisPolitics", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/28276603_1939096412788506_2850422809072819205_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlBSULvu9xr5smvB3kmRub5MfL3SpyPxNX94GEyc5skmb19swOR40nthDv1Kip3kcw&_nc_ht=scontent.xx&oh=b734d3faa39291c805198e3ad7de3450&oe=5DFF0890", - "subscriberCount": 6074746, - "url": "https://www.facebook.com/908009612563863", - "platform": "Facebook", - "platformId": "908009612563863", - "verified": True, - }, - "summary": { - "loveCount": 1871, - "threePlusMinuteVideoCount": 13, - "totalInteractionCount": 78197, - "wowCount": 2485, - "thankfulCount": 0, - "interactionRate": 0.06435188115661188, - "likeCount": 12525, - "hahaCount": 4937, - "commentCount": 12668, - "shareCount": 25742, - "sadCount": 1903, - "angryCount": 16066, - "totalVideoTimeMS": 5019375, - "postCount": 20, - }, - "breakdown": { - "native_video": { - "loveCount": 70, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 36820, - "wowCount": 1089, - "thankfulCount": 0, - "interactionRate": 0.30307447738378734, - "likeCount": 2722, - "hahaCount": 195, - "commentCount": 4278, - "shareCount": 16490, - "sadCount": 1151, - "angryCount": 10825, - "totalVideoTimeMS": 1091047, - "postCount": 2, - }, - "owned_video": { - "loveCount": 1792, - "threePlusMinuteVideoCount": 10, - "totalInteractionCount": 73717, - "wowCount": 2373, - "thankfulCount": 0, - "interactionRate": 0.07584270055986465, - "likeCount": 11977, - "hahaCount": 3646, - "commentCount": 11480, - "shareCount": 24889, - "sadCount": 1686, - "angryCount": 15874, - "totalVideoTimeMS": 4317388, - "postCount": 16, - }, - "crosspost": { - "loveCount": 1722, - "threePlusMinuteVideoCount": 9, - "totalInteractionCount": 36897, - "wowCount": 1284, - "thankfulCount": 0, - "interactionRate": 0.04337866637187831, - "likeCount": 9255, - "hahaCount": 3451, - "commentCount": 7202, - "shareCount": 8399, - "sadCount": 535, - "angryCount": 5049, - "totalVideoTimeMS": 3226341, - "postCount": 14, - }, - "share": { - "loveCount": 79, - "threePlusMinuteVideoCount": 3, - "totalInteractionCount": 4480, - "wowCount": 112, - "thankfulCount": 0, - "interactionRate": 0.018437991019546, - "likeCount": 548, - "hahaCount": 1291, - "commentCount": 1188, - "shareCount": 853, - "sadCount": 217, - "angryCount": 192, - "totalVideoTimeMS": 701987, - "postCount": 4, - }, - }, - "subscriberData": {"initialCount": 6074083, "finalCount": 6074746}, - }, - { - "account": { - "id": 13889, - "name": "NBC News", - "handle": "NBCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/58460954_3259154034104604_4667908299973197824_n.png?_nc_cat=1&_nc_oc=AQkP72-xbAw6uUN-KZG8hLfS-bT5o6BRIMSNURKuXBbEhrFa7sT75fvZfTBZDVa21CU&_nc_ht=scontent.xx&oh=ddb1e61de6dabbf61e903f59efde1f0c&oe=5DF7A653", - "subscriberCount": 9970540, - "url": "https://www.facebook.com/155869377766434", - "platform": "Facebook", - "platformId": "155869377766434", - "verified": True, - }, - "summary": { - "loveCount": 1957, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 77341, - "wowCount": 4953, - "thankfulCount": 0, - "interactionRate": 0.006679650864879389, - "likeCount": 13740, - "hahaCount": 4266, - "commentCount": 10747, - "shareCount": 14838, - "sadCount": 16923, - "angryCount": 9917, - "totalVideoTimeMS": 908004, - "postCount": 116, - }, - "breakdown": { - "native_video": { - "loveCount": 1, - "totalInteractionCount": 306, - "wowCount": 61, - "thankfulCount": 0, - "interactionRate": 0.0015345143878776975, - "likeCount": 99, - "hahaCount": 5, - "commentCount": 39, - "shareCount": 69, - "sadCount": 31, - "angryCount": 1, - "totalVideoTimeMS": 23829, - "postCount": 2, - }, - "owned_video": { - "loveCount": 6, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 700, - "wowCount": 86, - "thankfulCount": 0, - "interactionRate": 0.0014041308124371087, - "likeCount": 234, - "hahaCount": 9, - "commentCount": 80, - "shareCount": 169, - "sadCount": 80, - "angryCount": 36, - "totalVideoTimeMS": 372083, - "postCount": 5, - }, - "crosspost": { - "loveCount": 5, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 394, - "wowCount": 25, - "thankfulCount": 0, - "interactionRate": 0.0013138652602090089, - "likeCount": 135, - "hahaCount": 4, - "commentCount": 41, - "shareCount": 100, - "sadCount": 49, - "angryCount": 35, - "totalVideoTimeMS": 348254, - "postCount": 3, - }, - "link": { - "shareCount": 14613, - "loveCount": 1935, - "totalInteractionCount": 75918, - "wowCount": 4848, - "sadCount": 16618, - "angryCount": 9873, - "thankfulCount": 0, - "postCount": 107, - "interactionRate": 0.007110919614413644, - "likeCount": 13294, - "hahaCount": 4240, - "commentCount": 10497, - }, - "share": { - "loveCount": 16, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 723, - "wowCount": 19, - "thankfulCount": 0, - "interactionRate": 0.001805311044561997, - "likeCount": 212, - "hahaCount": 17, - "commentCount": 170, - "shareCount": 56, - "sadCount": 225, - "angryCount": 8, - "totalVideoTimeMS": 535921, - "postCount": 4, - }, - }, - "subscriberData": {"initialCount": 9970622, "finalCount": 9970540}, - }, - { - "account": { - "id": 15633, - "name": "The Hill", - "handle": "TheHill", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/21463076_10155260793709087_1084657546973301538_n.png?_nc_cat=1&_nc_oc=AQnUzEj2cdNQuf4Zy5vyhBDVoFTJY2zVa3PSqT1fGgs9diAmCxedDkDI8vcWKhFV8jI&_nc_ht=scontent.xx&oh=7cdf47b74db6d8449753f4e74d7874fc&oe=5E028CD1", - "subscriberCount": 1380989, - "url": "https://www.facebook.com/7533944086", - "platform": "Facebook", - "platformId": "7533944086", - "verified": True, - }, - "summary": { - "shareCount": 10494, - "loveCount": 4188, - "totalInteractionCount": 73354, - "wowCount": 1779, - "sadCount": 1862, - "angryCount": 7496, - "thankfulCount": 0, - "postCount": 53, - "interactionRate": 0.10021734274150189, - "likeCount": 30160, - "hahaCount": 6470, - "commentCount": 10905, - }, - "breakdown": { - "link": { - "shareCount": 10494, - "loveCount": 4188, - "totalInteractionCount": 73354, - "wowCount": 1779, - "sadCount": 1862, - "angryCount": 7496, - "thankfulCount": 0, - "postCount": 53, - "interactionRate": 0.10021734274150189, - "likeCount": 30160, - "hahaCount": 6470, - "commentCount": 10905, - } - }, - "subscriberData": {"initialCount": 1381008, "finalCount": 1380989}, - }, - { - "account": { - "id": 93420, - "name": "The Western Journal", - "handle": "WesternJournal", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49664345_10157205261148984_1256195277434388480_n.png?_nc_cat=1&_nc_oc=AQkUo1GJrlGqxXcfjFxGkOcXookw_tgn8qATXCSI0ICt6sibuBdTtyIuuWj9iPLw5ZM&_nc_ht=scontent.xx&oh=bf010f921f678fbb0032a465900b5f24&oe=5DF8F16D", - "subscriberCount": 5184899, - "url": "https://www.facebook.com/123624513983", - "platform": "Facebook", - "platformId": "123624513983", - "verified": True, - }, - "summary": { - "shareCount": 16119, - "loveCount": 3712, - "totalInteractionCount": 65026, - "wowCount": 858, - "sadCount": 594, - "angryCount": 1324, - "thankfulCount": 0, - "postCount": 36, - "interactionRate": 0.034831203666881, - "likeCount": 35643, - "hahaCount": 1656, - "commentCount": 5120, - }, - "breakdown": { - "link": { - "shareCount": 4234, - "loveCount": 1235, - "totalInteractionCount": 24785, - "wowCount": 799, - "sadCount": 581, - "angryCount": 1289, - "thankfulCount": 0, - "postCount": 34, - "interactionRate": 0.014040485199052807, - "likeCount": 11302, - "hahaCount": 1460, - "commentCount": 3885, - }, - "photo": { - "shareCount": 11885, - "loveCount": 2477, - "totalInteractionCount": 40241, - "wowCount": 59, - "sadCount": 13, - "angryCount": 35, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.38804198105074517, - "likeCount": 24341, - "hahaCount": 196, - "commentCount": 1235, - }, - }, - "subscriberData": {"initialCount": 5185113, "finalCount": 5184899}, - }, - { - "account": { - "id": 5860, - "name": "Being Liberal", - "handle": "beingliberal.org", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16865169_10154418564961275_3050958479071030073_n.png?_nc_cat=1&_nc_oc=AQlPF5wIrIXWCeRBPDA5P17NqQMaux6LCm9Ak8V6ktaSHP0ajoY7MreFOF-RleH_5sQ&_nc_ht=scontent.xx&oh=39015e43af0ae9881035d6aa4a9fe5fc&oe=5E0A093D", - "subscriberCount": 1693705, - "url": "https://www.facebook.com/177486166274", - "platform": "Facebook", - "platformId": "177486166274", - "verified": True, - }, - "summary": { - "shareCount": 13979, - "loveCount": 1747, - "totalInteractionCount": 58401, - "wowCount": 1941, - "sadCount": 3755, - "angryCount": 13887, - "thankfulCount": 0, - "postCount": 37, - "interactionRate": 0.09316871951757733, - "likeCount": 12741, - "hahaCount": 4326, - "commentCount": 6025, - }, - "breakdown": { - "link": { - "shareCount": 13383, - "loveCount": 1724, - "totalInteractionCount": 55876, - "wowCount": 1931, - "sadCount": 3745, - "angryCount": 13876, - "thankfulCount": 0, - "postCount": 36, - "interactionRate": 0.09163362020993664, - "likeCount": 12116, - "hahaCount": 3161, - "commentCount": 5940, - }, - "photo": { - "shareCount": 596, - "loveCount": 23, - "totalInteractionCount": 2525, - "wowCount": 10, - "sadCount": 10, - "angryCount": 11, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.14908175968433635, - "likeCount": 625, - "hahaCount": 1165, - "commentCount": 85, - }, - }, - "subscriberData": {"initialCount": 1693698, "finalCount": 1693705}, - }, - { - "account": { - "id": 19065, - "name": "The Independent", - "handle": "TheIndependentOnline", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/11051795_10152732082756636_6705742038347351188_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQmApCC_log9_TfPU5-TLVRKHyBo2YH6UPG2d6R-43r5u7HhElr7QPKk9J_AXR9q1Ac&_nc_ht=scontent.xx&oh=47ac79067cb2e33520f6920eb409611d&oe=5E0FED75", - "subscriberCount": 8834731, - "url": "https://www.facebook.com/13312631635", - "platform": "Facebook", - "platformId": "13312631635", - "verified": True, - }, - "summary": { - "loveCount": 3261, - "totalInteractionCount": 57897, - "wowCount": 1631, - "thankfulCount": 0, - "interactionRate": 0.009497613597231903, - "likeCount": 21280, - "hahaCount": 8014, - "commentCount": 9023, - "shareCount": 11766, - "sadCount": 737, - "angryCount": 2185, - "totalVideoTimeMS": 375324, - "postCount": 69, - }, - "breakdown": { - "owned_video": { - "loveCount": 276, - "totalInteractionCount": 6287, - "wowCount": 215, - "thankfulCount": 0, - "interactionRate": 0.010165502992031287, - "likeCount": 1951, - "hahaCount": 675, - "commentCount": 1599, - "shareCount": 1159, - "sadCount": 98, - "angryCount": 314, - "totalVideoTimeMS": 375324, - "postCount": 7, - }, - "crosspost": { - "loveCount": 276, - "totalInteractionCount": 6287, - "wowCount": 215, - "thankfulCount": 0, - "interactionRate": 0.010165502992031287, - "likeCount": 1951, - "hahaCount": 675, - "commentCount": 1599, - "shareCount": 1159, - "sadCount": 98, - "angryCount": 314, - "totalVideoTimeMS": 375324, - "postCount": 7, - }, - "link": { - "shareCount": 6888, - "loveCount": 1059, - "totalInteractionCount": 29264, - "wowCount": 843, - "sadCount": 321, - "angryCount": 319, - "thankfulCount": 0, - "postCount": 37, - "interactionRate": 0.008942925794771399, - "likeCount": 10342, - "hahaCount": 4841, - "commentCount": 4651, - }, - "video": { - "shareCount": 3719, - "loveCount": 1926, - "totalInteractionCount": 22346, - "wowCount": 573, - "sadCount": 318, - "angryCount": 1552, - "thankfulCount": 0, - "postCount": 25, - "interactionRate": 0.010108902195861849, - "likeCount": 8987, - "hahaCount": 2498, - "commentCount": 2773, - }, - }, - "subscriberData": {"initialCount": 8832865, "finalCount": 8834731}, - }, - { - "account": { - "id": 48734, - "name": "Young Conservatives", - "handle": "TheYoungCons", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/45427184_10155855954205841_8373169778709233664_n.jpg?_nc_cat=1&_nc_oc=AQmAgxZhqj9CXmiY228VRUATEHxELlR7p8BpNguYxOU_n6uxWw17ssXZSIF2mv2DreA&_nc_ht=scontent.xx&oh=ea10aeb60d4d31efb95e2c0a9f7ee098&oe=5DFE69A3", - "subscriberCount": 1000057, - "url": "https://www.facebook.com/147772245840", - "platform": "Facebook", - "platformId": "147772245840", - "verified": False, - }, - "summary": { - "loveCount": 1973, - "totalInteractionCount": 55399, - "wowCount": 344, - "thankfulCount": 0, - "interactionRate": 0.27690138450692253, - "likeCount": 16735, - "hahaCount": 4505, - "commentCount": 21226, - "shareCount": 6426, - "sadCount": 1039, - "angryCount": 3151, - "totalVideoTimeMS": 28928, - "postCount": 20, - }, - "breakdown": { - "native_video": { - "loveCount": 2, - "totalInteractionCount": 87, - "wowCount": 0, - "thankfulCount": 0, - "interactionRate": 0.0043000215001075, - "likeCount": 63, - "hahaCount": 1, - "commentCount": 14, - "shareCount": 6, - "sadCount": 0, - "angryCount": 1, - "totalVideoTimeMS": 28928, - "postCount": 2, - }, - "owned_video": { - "loveCount": 2, - "totalInteractionCount": 87, - "wowCount": 0, - "thankfulCount": 0, - "interactionRate": 0.0043000215001075, - "likeCount": 63, - "hahaCount": 1, - "commentCount": 14, - "shareCount": 6, - "sadCount": 0, - "angryCount": 1, - "totalVideoTimeMS": 28928, - "postCount": 2, - }, - "link": { - "shareCount": 231, - "loveCount": 70, - "totalInteractionCount": 2764, - "wowCount": 35, - "sadCount": 61, - "angryCount": 282, - "thankfulCount": 0, - "postCount": 9, - "interactionRate": 0.030700153500767505, - "likeCount": 880, - "hahaCount": 465, - "commentCount": 740, - }, - "photo": { - "shareCount": 6189, - "loveCount": 1901, - "totalInteractionCount": 52548, - "wowCount": 309, - "sadCount": 978, - "angryCount": 2868, - "thankfulCount": 0, - "postCount": 9, - "interactionRate": 0.5838029190145951, - "likeCount": 15792, - "hahaCount": 4039, - "commentCount": 20472, - }, - }, - "subscriberData": {"initialCount": 999933, "finalCount": 1000057}, - }, - { - "account": { - "id": 48733, - "name": "Conservative Tribune by WJ", - "handle": "theconservativetribune", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/46353000_2202571529821371_2816384259860725760_n.png?_nc_cat=1&_nc_oc=AQmyLmtQSJjNV6pRGGi1jlDx51XV7ULbxHYoibyNBKmronK_dpS07OVljvF5-BdX07s&_nc_ht=scontent.xx&oh=eeade969630115fc0c1ec64d4a462e0f&oe=5DF58CE0", - "subscriberCount": 4272095, - "url": "https://www.facebook.com/519305544814653", - "platform": "Facebook", - "platformId": "519305544814653", - "verified": True, - }, - "summary": { - "shareCount": 13762, - "loveCount": 3067, - "totalInteractionCount": 54701, - "wowCount": 483, - "sadCount": 429, - "angryCount": 2003, - "thankfulCount": 0, - "postCount": 21, - "interactionRate": 0.06095214554361168, - "likeCount": 29598, - "hahaCount": 1289, - "commentCount": 4070, - }, - "breakdown": { - "link": { - "shareCount": 3972, - "loveCount": 668, - "totalInteractionCount": 19515, - "wowCount": 438, - "sadCount": 421, - "angryCount": 1982, - "thankfulCount": 0, - "postCount": 19, - "interactionRate": 0.024039114237054224, - "likeCount": 7802, - "hahaCount": 1166, - "commentCount": 3066, - }, - "photo": { - "shareCount": 9790, - "loveCount": 2399, - "totalInteractionCount": 35186, - "wowCount": 45, - "sadCount": 8, - "angryCount": 21, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.4118014963704917, - "likeCount": 21796, - "hahaCount": 123, - "commentCount": 1004, - }, - }, - "subscriberData": {"initialCount": 4272313, "finalCount": 4272095}, - }, - { - "account": { - "id": 10337, - "name": "Washington Post", - "handle": "washingtonpost", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/21430382_10156479428327293_4985425836902947855_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlVAdyvl5eHjwkppWx8pvifrl3XbqjhakYzwfQ1AHjPFaQPjFxNF4BbZq5BQ1nys4Y&_nc_ht=scontent.xx&oh=6cea07f8fc3edae1f7c743fc8997901c&oe=5DC8AB0A", - "subscriberCount": 6289503, - "url": "https://www.facebook.com/6250307292", - "platform": "Facebook", - "platformId": "6250307292", - "verified": True, - }, - "summary": { - "shareCount": 11671, - "loveCount": 536, - "totalInteractionCount": 50242, - "wowCount": 2956, - "sadCount": 2904, - "angryCount": 11827, - "thankfulCount": 0, - "postCount": 27, - "interactionRate": 0.029573864462979164, - "likeCount": 12934, - "hahaCount": 2905, - "commentCount": 4509, - }, - "breakdown": { - "link": { - "shareCount": 11671, - "loveCount": 536, - "totalInteractionCount": 50242, - "wowCount": 2956, - "sadCount": 2904, - "angryCount": 11827, - "thankfulCount": 0, - "postCount": 27, - "interactionRate": 0.029573864462979164, - "likeCount": 12934, - "hahaCount": 2905, - "commentCount": 4509, - } - }, - "subscriberData": {"initialCount": 6289171, "finalCount": 6289503}, - }, - { - "account": { - "id": 5862, - "name": "BuzzFeed", - "handle": "BuzzFeed", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/11222622_10153870407270329_4094729505669388790_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlaEUp906VUUmeEPgfBCNmaczf4owSg6ehvwRebY_UVmSGVjDB_IUr4WGPgzRnptXU&_nc_ht=scontent.xx&oh=96b0a01485175975acdaeb06feb9d222&oe=5E06A54B", - "subscriberCount": 11870650, - "url": "https://www.facebook.com/21898300328", - "platform": "Facebook", - "platformId": "21898300328", - "verified": True, - }, - "summary": { - "loveCount": 2960, - "threePlusMinuteVideoCount": 8, - "totalInteractionCount": 48466, - "wowCount": 2861, - "thankfulCount": 0, - "interactionRate": 0.006798235407223357, - "likeCount": 20456, - "hahaCount": 4320, - "commentCount": 7550, - "shareCount": 6343, - "sadCount": 590, - "angryCount": 3386, - "totalVideoTimeMS": 4256816, - "postCount": 60, - }, - "breakdown": { - "native_video": { - "loveCount": 405, - "threePlusMinuteVideoCount": 3, - "totalInteractionCount": 3724, - "wowCount": 389, - "thankfulCount": 0, - "interactionRate": 0.01045428765844385, - "likeCount": 1849, - "hahaCount": 30, - "commentCount": 479, - "shareCount": 572, - "sadCount": 0, - "angryCount": 0, - "totalVideoTimeMS": 755856, - "postCount": 3, - }, - "owned_video": { - "loveCount": 421, - "threePlusMinuteVideoCount": 6, - "totalInteractionCount": 3879, - "wowCount": 391, - "thankfulCount": 0, - "interactionRate": 0.005441957959189949, - "likeCount": 1948, - "hahaCount": 40, - "commentCount": 487, - "shareCount": 591, - "sadCount": 0, - "angryCount": 1, - "totalVideoTimeMS": 2301642, - "postCount": 6, - }, - "crosspost": { - "loveCount": 16, - "threePlusMinuteVideoCount": 3, - "totalInteractionCount": 155, - "wowCount": 2, - "thankfulCount": 0, - "interactionRate": 0.00042962825993604853, - "likeCount": 99, - "hahaCount": 10, - "commentCount": 8, - "shareCount": 19, - "sadCount": 0, - "angryCount": 1, - "totalVideoTimeMS": 1545786, - "postCount": 3, - }, - "link": { - "shareCount": 4693, - "loveCount": 2254, - "totalInteractionCount": 38029, - "wowCount": 1877, - "sadCount": 589, - "angryCount": 3383, - "thankfulCount": 0, - "postCount": 52, - "interactionRate": 0.006158005059083363, - "likeCount": 16435, - "hahaCount": 4247, - "commentCount": 4551, - }, - "share": { - "loveCount": 285, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 6558, - "wowCount": 593, - "thankfulCount": 0, - "interactionRate": 0.027622569888829475, - "likeCount": 2073, - "hahaCount": 33, - "commentCount": 2512, - "shareCount": 1059, - "sadCount": 1, - "angryCount": 2, - "totalVideoTimeMS": 1955174, - "postCount": 2, - }, - }, - "subscriberData": {"initialCount": 11870805, "finalCount": 11870650}, - }, - { - "account": { - "id": 14655, - "name": "CBS News", - "handle": "CBSNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/c7.0.200.200a/p200x200/11052868_10153128917450950_7657871426571821819_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlXjGTrfksAnoG50hBe4WDnf00w6XeLzrCR-xvjCQkB_VlwwTuquCV4zQB0tMkmVTU&_nc_ht=scontent.xx&oh=66fa68d473b2015c3875d62e625a12d1&oe=5E0EF6CB", - "subscriberCount": 5892766, - "url": "https://www.facebook.com/131459315949", - "platform": "Facebook", - "platformId": "131459315949", - "verified": True, - }, - "summary": { - "loveCount": 4010, - "totalInteractionCount": 45029, - "wowCount": 1898, - "thankfulCount": 0, - "interactionRate": 0.009418505700614213, - "likeCount": 15748, - "hahaCount": 3452, - "commentCount": 7560, - "shareCount": 7098, - "sadCount": 2466, - "angryCount": 2797, - "totalVideoTimeMS": 460708, - "postCount": 81, - }, - "breakdown": { - "owned_video": { - "loveCount": 1530, - "totalInteractionCount": 7475, - "wowCount": 146, - "thankfulCount": 0, - "interactionRate": 0.015850242025898513, - "likeCount": 3158, - "hahaCount": 48, - "commentCount": 519, - "shareCount": 794, - "sadCount": 969, - "angryCount": 311, - "totalVideoTimeMS": 460708, - "postCount": 8, - }, - "crosspost": { - "loveCount": 1530, - "totalInteractionCount": 7475, - "wowCount": 146, - "thankfulCount": 0, - "interactionRate": 0.015850242025898513, - "likeCount": 3158, - "hahaCount": 48, - "commentCount": 519, - "shareCount": 794, - "sadCount": 969, - "angryCount": 311, - "totalVideoTimeMS": 460708, - "postCount": 8, - }, - "link": { - "shareCount": 1042, - "loveCount": 239, - "totalInteractionCount": 6094, - "wowCount": 522, - "sadCount": 395, - "angryCount": 129, - "thankfulCount": 0, - "postCount": 23, - "interactionRate": 0.004480154062994869, - "likeCount": 2279, - "hahaCount": 329, - "commentCount": 1159, - }, - "video": { - "shareCount": 5262, - "loveCount": 2241, - "totalInteractionCount": 31460, - "wowCount": 1230, - "sadCount": 1102, - "angryCount": 2357, - "thankfulCount": 0, - "postCount": 50, - "interactionRate": 0.01067430646069611, - "likeCount": 10311, - "hahaCount": 3075, - "commentCount": 5882, - }, - }, - "subscriberData": {"initialCount": 5892543, "finalCount": 5892766}, - }, - { - "account": { - "id": 3832, - "name": "MoveOn", - "handle": "moveon", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/31206661_10155375246245493_295037061581229251_n.png?_nc_cat=1&_nc_oc=AQlSE1FqdCbaeopNV1yaNtJ3CmFLidqKES5CzQDuKERpCBGKUk_e3fO242Wi3KvNKSE&_nc_ht=scontent.xx&oh=ca9e5b7aef01fe823dc1929cfd53827d&oe=5E10EEAD", - "subscriberCount": 1654130, - "url": "https://www.facebook.com/7292655492", - "platform": "Facebook", - "platformId": "7292655492", - "verified": True, - }, - "summary": { - "loveCount": 2867, - "totalInteractionCount": 43301, - "wowCount": 479, - "thankfulCount": 0, - "interactionRate": 0.11897438997664947, - "likeCount": 18596, - "hahaCount": 432, - "commentCount": 5458, - "shareCount": 8787, - "sadCount": 2329, - "angryCount": 4353, - "totalVideoTimeMS": 91134, - "postCount": 22, - }, - "breakdown": { - "owned_video": { - "loveCount": 573, - "totalInteractionCount": 6538, - "wowCount": 23, - "thankfulCount": 0, - "interactionRate": 0.19762565083011538, - "likeCount": 1206, - "hahaCount": 202, - "commentCount": 3324, - "shareCount": 927, - "sadCount": 28, - "angryCount": 255, - "totalVideoTimeMS": 91134, - "postCount": 2, - }, - "crosspost": { - "loveCount": 573, - "totalInteractionCount": 6538, - "wowCount": 23, - "thankfulCount": 0, - "interactionRate": 0.19762565083011538, - "likeCount": 1206, - "hahaCount": 202, - "commentCount": 3324, - "shareCount": 927, - "sadCount": 28, - "angryCount": 255, - "totalVideoTimeMS": 91134, - "postCount": 2, - }, - "link": { - "shareCount": 6704, - "loveCount": 1923, - "totalInteractionCount": 28655, - "wowCount": 341, - "sadCount": 1916, - "angryCount": 3679, - "thankfulCount": 0, - "postCount": 16, - "interactionRate": 0.10821349494827366, - "likeCount": 12332, - "hahaCount": 160, - "commentCount": 1600, - }, - "photo": { - "shareCount": 1156, - "loveCount": 371, - "totalInteractionCount": 8108, - "wowCount": 115, - "sadCount": 385, - "angryCount": 419, - "thankfulCount": 0, - "postCount": 4, - "interactionRate": 0.12254120349729089, - "likeCount": 5058, - "hahaCount": 70, - "commentCount": 534, - }, - }, - "subscriberData": {"initialCount": 1654145, "finalCount": 1654130}, - }, - { - "account": { - "id": 3921, - "name": "The Rachel Maddow Show", - "handle": "therachelmaddowshow", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/560412_10150641324209067_326441500_n.jpg?_nc_cat=1&_nc_oc=AQll8voihNLTZqhNJxHo54RezqGbTpA2ADMeAJ0m1c--3__ynoI3yGrzvpSMzT6QrNI&_nc_ht=scontent.xx&oh=8f8d327dc4a47e1af85f9d3da82d4eb3&oe=5DFB0DA7", - "subscriberCount": 2643600, - "url": "https://www.facebook.com/25987609066", - "platform": "Facebook", - "platformId": "25987609066", - "verified": True, - }, - "summary": { - "shareCount": 10879, - "loveCount": 42, - "totalInteractionCount": 41298, - "wowCount": 3593, - "sadCount": 446, - "angryCount": 15180, - "thankfulCount": 0, - "postCount": 3, - "interactionRate": 0.5207293085186866, - "likeCount": 3884, - "hahaCount": 2154, - "commentCount": 5120, - }, - "breakdown": { - "link": { - "shareCount": 10879, - "loveCount": 42, - "totalInteractionCount": 41298, - "wowCount": 3593, - "sadCount": 446, - "angryCount": 15180, - "thankfulCount": 0, - "postCount": 3, - "interactionRate": 0.5207293085186866, - "likeCount": 3884, - "hahaCount": 2154, - "commentCount": 5120, - } - }, - "subscriberData": {"initialCount": 2643600, "finalCount": 2643600}, - }, - { - "account": { - "id": 5740, - "name": "The Guardian", - "handle": "theguardian", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/46160148_10157340584076323_3990431626264838144_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkKD6tb0oraHl_Qq9dA1S51ktyWhE9lPo7udOrFCRkfCctJldfDrwPVn7PcSDSY5Sc&_nc_ht=scontent.xx&oh=8c51a127f7d06b002a6fcba57abe5181&oe=5DFDE22E", - "subscriberCount": 8186263, - "url": "https://www.facebook.com/10513336322", - "platform": "Facebook", - "platformId": "10513336322", - "verified": True, - }, - "summary": { - "shareCount": 5542, - "loveCount": 2243, - "totalInteractionCount": 41152, - "wowCount": 1280, - "sadCount": 2436, - "angryCount": 2683, - "thankfulCount": 0, - "postCount": 62, - "interactionRate": 0.008099022583568658, - "likeCount": 19896, - "hahaCount": 1919, - "commentCount": 5153, - }, - "breakdown": { - "link": { - "shareCount": 5542, - "loveCount": 2243, - "totalInteractionCount": 41152, - "wowCount": 1280, - "sadCount": 2436, - "angryCount": 2683, - "thankfulCount": 0, - "postCount": 62, - "interactionRate": 0.008099022583568658, - "likeCount": 19896, - "hahaCount": 1919, - "commentCount": 5153, - } - }, - "subscriberData": {"initialCount": 8186083, "finalCount": 8186263}, - }, - { - "account": { - "id": 1260835, - "name": "Democratic Coalition Against Trump", - "handle": "TheDemocraticCoalition", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16939431_599665190236860_5252814024528030433_n.png?_nc_cat=1&_nc_oc=AQlrHI03LbP9V0m-Es6d3p44SF5EnNO5lv0A3X_61rp1hvImkRh1UwXCXhOmUPsM4VQ&_nc_ht=scontent.xx&oh=418e568864bbd9412488d5ff27c67545&oe=5DCAD77C", - "subscriberCount": 404090, - "url": "https://www.facebook.com/452797224923658", - "platform": "Facebook", - "platformId": "452797224923658", - "verified": False, - }, - "summary": { - "shareCount": 8954, - "loveCount": 1151, - "totalInteractionCount": 38707, - "wowCount": 539, - "sadCount": 1101, - "angryCount": 6925, - "thankfulCount": 0, - "postCount": 23, - "interactionRate": 0.41631602395920997, - "likeCount": 13869, - "hahaCount": 2374, - "commentCount": 3794, - }, - "breakdown": { - "link": { - "shareCount": 3325, - "loveCount": 287, - "totalInteractionCount": 15744, - "wowCount": 392, - "sadCount": 337, - "angryCount": 3378, - "thankfulCount": 0, - "postCount": 11, - "interactionRate": 0.35419038661452407, - "likeCount": 5126, - "hahaCount": 901, - "commentCount": 1998, - }, - "photo": { - "shareCount": 5629, - "loveCount": 864, - "totalInteractionCount": 22963, - "wowCount": 147, - "sadCount": 764, - "angryCount": 3547, - "thankfulCount": 0, - "postCount": 12, - "interactionRate": 0.4734914113162715, - "likeCount": 8743, - "hahaCount": 1473, - "commentCount": 1796, - }, - }, - "subscriberData": {"initialCount": 403950, "finalCount": 404090}, - }, - { - "account": { - "id": 8806, - "name": "U.S. Senator Bernie Sanders", - "handle": "senatorsanders", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/c78.0.200.200a/p200x200/400832_10151148541197908_1621512611_n.jpg?_nc_cat=1&_nc_oc=AQnOgGmDEpewZmHOa_WpZV3xQAnvB1i9gDx-nu5MzPilI0ir8LtVvORdwBDCvfzICSE&_nc_ht=scontent.xx&oh=a94d5b4a17b62ba41e432a7968447146&oe=5E07418B", - "subscriberCount": 7504771, - "url": "https://www.facebook.com/9124187907", - "platform": "Facebook", - "platformId": "9124187907", - "verified": True, - }, - "summary": { - "loveCount": 1231, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 36094, - "wowCount": 423, - "thankfulCount": 0, - "interactionRate": 0.09617611341372971, - "likeCount": 20544, - "hahaCount": 139, - "commentCount": 1454, - "shareCount": 6851, - "sadCount": 3025, - "angryCount": 2427, - "totalVideoTimeMS": 304380, - "postCount": 5, - }, - "breakdown": { - "owned_video": { - "loveCount": 466, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 13191, - "wowCount": 302, - "thankfulCount": 0, - "interactionRate": 0.08787496092595559, - "likeCount": 6492, - "hahaCount": 76, - "commentCount": 848, - "shareCount": 2723, - "sadCount": 254, - "angryCount": 2030, - "totalVideoTimeMS": 304380, - "postCount": 2, - }, - "crosspost": { - "loveCount": 466, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 13191, - "wowCount": 302, - "thankfulCount": 0, - "interactionRate": 0.08787496092595559, - "likeCount": 6492, - "hahaCount": 76, - "commentCount": 848, - "shareCount": 2723, - "sadCount": 254, - "angryCount": 2030, - "totalVideoTimeMS": 304380, - "postCount": 2, - }, - "link": { - "shareCount": 200, - "loveCount": 141, - "totalInteractionCount": 2317, - "wowCount": 43, - "sadCount": 156, - "angryCount": 46, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.015429750531047243, - "likeCount": 1501, - "hahaCount": 32, - "commentCount": 198, - }, - "photo": { - "shareCount": 3928, - "loveCount": 624, - "totalInteractionCount": 20586, - "wowCount": 78, - "sadCount": 2615, - "angryCount": 351, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.27429779311929064, - "likeCount": 12551, - "hahaCount": 31, - "commentCount": 408, - }, - }, - "subscriberData": {"initialCount": 7505193, "finalCount": 7504771}, - }, - { - "account": { - "id": 3919, - "name": "Upworthy", - "handle": "Upworthy", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1914363_1176320005742189_4709951186905632219_n.png?_nc_cat=1&_nc_oc=AQlPiX5mYxZC_Xj8_M4a7JZZvCD27izvAXTMtobXrLjwA4S5Pel-CsMh5GMouHt8LNg&_nc_ht=scontent.xx&oh=ba4e0db7c2521356dc17108d8aa4a12a&oe=5E04D944", - "subscriberCount": 11752205, - "url": "https://www.facebook.com/354522044588660", - "platform": "Facebook", - "platformId": "354522044588660", - "verified": True, - }, - "summary": { - "shareCount": 6787, - "loveCount": 4789, - "totalInteractionCount": 33998, - "wowCount": 2581, - "sadCount": 2794, - "angryCount": 601, - "thankfulCount": 0, - "postCount": 10, - "interactionRate": 0.028922232040710656, - "likeCount": 14654, - "hahaCount": 198, - "commentCount": 1594, - }, - "breakdown": { - "link": { - "shareCount": 6450, - "loveCount": 2392, - "totalInteractionCount": 26074, - "wowCount": 2494, - "sadCount": 2791, - "angryCount": 601, - "thankfulCount": 0, - "postCount": 9, - "interactionRate": 0.024650693210337974, - "likeCount": 9648, - "hahaCount": 195, - "commentCount": 1503, - }, - "photo": { - "shareCount": 337, - "loveCount": 2397, - "totalInteractionCount": 7924, - "wowCount": 87, - "sadCount": 3, - "angryCount": 0, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.06742564480452817, - "likeCount": 5006, - "hahaCount": 3, - "commentCount": 91, - }, - }, - "subscriberData": {"initialCount": 11752205, "finalCount": 11752205}, - }, - { - "account": { - "id": 16337, - "name": "Yahoo News", - "handle": "yahoonews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1234558_10151822723996037_1232781499_n.jpg?_nc_cat=1&_nc_oc=AQkPmfbCJFc9Ll_w6v-FBqGGulHvLsK6m9J20HAPS45YGyFGlkUw6ZZKS6yuaKxI_V0&_nc_ht=scontent.xx&oh=e2ffaa2bbb04dd746da7d26542134656&oe=5DFF9BED", - "subscriberCount": 7865795, - "url": "https://www.facebook.com/338028696036", - "platform": "Facebook", - "platformId": "338028696036", - "verified": True, - }, - "summary": { - "shareCount": 8090, - "loveCount": 1049, - "totalInteractionCount": 30391, - "wowCount": 1730, - "sadCount": 4389, - "angryCount": 2623, - "thankfulCount": 0, - "postCount": 42, - "interactionRate": 0.009191497800969112, - "likeCount": 6682, - "hahaCount": 1594, - "commentCount": 4234, - }, - "breakdown": { - "link": { - "shareCount": 8069, - "loveCount": 1042, - "totalInteractionCount": 29826, - "wowCount": 1722, - "sadCount": 4382, - "angryCount": 2554, - "thankfulCount": 0, - "postCount": 41, - "interactionRate": 0.00924234979433547, - "likeCount": 6615, - "hahaCount": 1467, - "commentCount": 3975, - }, - "photo": { - "shareCount": 21, - "loveCount": 7, - "totalInteractionCount": 565, - "wowCount": 8, - "sadCount": 7, - "angryCount": 69, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.007182844062997992, - "likeCount": 67, - "hahaCount": 127, - "commentCount": 259, - }, - }, - "subscriberData": {"initialCount": 7866135, "finalCount": 7865795}, - }, - { - "account": { - "id": 51124, - "name": "Robert Reich", - "handle": "RBReich", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/14925611_1361055967240329_5201622253030095993_n.png?_nc_cat=1&_nc_oc=AQnnHM7tmaHqxLEWywroKLLlnR80odzpdoGwJG-9nfBPEgYbyMODen29YtOfkwesH5s&_nc_ht=scontent.xx&oh=ef9b38d97600f9e166d77c3ac2b47a40&oe=5DF447FA", - "subscriberCount": 2450538, - "url": "https://www.facebook.com/142474049098533", - "platform": "Facebook", - "platformId": "142474049098533", - "verified": True, - }, - "summary": { - "loveCount": 116, - "totalInteractionCount": 28106, - "wowCount": 598, - "thankfulCount": 0, - "interactionRate": 0.3822872226576185, - "likeCount": 9539, - "hahaCount": 2977, - "commentCount": 1407, - "shareCount": 7700, - "sadCount": 1362, - "angryCount": 4407, - "totalVideoTimeMS": 141182, - "postCount": 3, - }, - "breakdown": { - "owned_video": { - "loveCount": 50, - "totalInteractionCount": 4781, - "wowCount": 48, - "thankfulCount": 0, - "interactionRate": 0.19510196536358604, - "likeCount": 2047, - "hahaCount": 26, - "commentCount": 258, - "shareCount": 1675, - "sadCount": 242, - "angryCount": 435, - "totalVideoTimeMS": 141182, - "postCount": 1, - }, - "crosspost": { - "loveCount": 50, - "totalInteractionCount": 4781, - "wowCount": 48, - "thankfulCount": 0, - "interactionRate": 0.19510196536358604, - "likeCount": 2047, - "hahaCount": 26, - "commentCount": 258, - "shareCount": 1675, - "sadCount": 242, - "angryCount": 435, - "totalVideoTimeMS": 141182, - "postCount": 1, - }, - "link": { - "shareCount": 3759, - "loveCount": 10, - "totalInteractionCount": 10777, - "wowCount": 476, - "sadCount": 167, - "angryCount": 3616, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.439785375595768, - "likeCount": 2227, - "hahaCount": 17, - "commentCount": 505, - }, - "photo": { - "shareCount": 2266, - "loveCount": 56, - "totalInteractionCount": 12548, - "wowCount": 74, - "sadCount": 953, - "angryCount": 356, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.5120559425606103, - "likeCount": 5265, - "hahaCount": 2934, - "commentCount": 644, - }, - }, - "subscriberData": {"initialCount": 2450489, "finalCount": 2450538}, - }, - { - "account": { - "id": 4010, - "name": "CREDO Mobile", - "handle": "CREDO", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/999271_10151925847135968_1940376022_n.png?_nc_cat=1&_nc_oc=AQmkEi4kvJX6wR9R0wgtJW7cJsK92vaVeoDNi3KsZ73HZ11pK-zDJUhBvAU8294ZxOM&_nc_ht=scontent.xx&oh=c3f09575f02dbb46fc74b68e6b9ac627&oe=5DFCD329", - "subscriberCount": 686618, - "url": "https://www.facebook.com/6851405967", - "platform": "Facebook", - "platformId": "6851405967", - "verified": True, - }, - "summary": { - "loveCount": 2464, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 25985, - "wowCount": 838, - "thankfulCount": 0, - "interactionRate": 0.172005112090969, - "likeCount": 10372, - "hahaCount": 414, - "commentCount": 1631, - "shareCount": 6526, - "sadCount": 530, - "angryCount": 3210, - "totalVideoTimeMS": 184617, - "postCount": 22, - }, - "breakdown": { - "link": { - "shareCount": 3818, - "loveCount": 1618, - "totalInteractionCount": 14259, - "wowCount": 714, - "sadCount": 450, - "angryCount": 2101, - "thankfulCount": 0, - "postCount": 14, - "interactionRate": 0.14826520246283356, - "likeCount": 4354, - "hahaCount": 136, - "commentCount": 1068, - }, - "photo": { - "shareCount": 2434, - "loveCount": 440, - "totalInteractionCount": 10211, - "wowCount": 114, - "sadCount": 80, - "angryCount": 1109, - "thankfulCount": 0, - "postCount": 6, - "interactionRate": 0.24773979311324157, - "likeCount": 5257, - "hahaCount": 278, - "commentCount": 499, - }, - "share": { - "loveCount": 149, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 576, - "wowCount": 6, - "thankfulCount": 0, - "interactionRate": 0.0838907235939019, - "likeCount": 304, - "hahaCount": 0, - "commentCount": 29, - "shareCount": 88, - "sadCount": 0, - "angryCount": 0, - "totalVideoTimeMS": 184617, - "postCount": 1, - }, - "video": { - "shareCount": 186, - "loveCount": 257, - "totalInteractionCount": 939, - "wowCount": 4, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.13675935669214215, - "likeCount": 457, - "hahaCount": 0, - "commentCount": 35, - }, - }, - "subscriberData": {"initialCount": 686597, "finalCount": 686618}, - }, - { - "account": { - "id": 10247, - "name": "NowThis", - "handle": "NowThisNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/28379313_1840609126029203_6405012222846484702_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQkFdmIYy2uPLXX0xb7b7uQjQ-yiayvSBaPWqSlby_pCoW_1_Iybmu7xSmUb-UMr1gc&_nc_ht=scontent.xx&oh=add01854d7218f79e9aad6351846e535&oe=5E0CA890", - "subscriberCount": 14558656, - "url": "https://www.facebook.com/341163402640457", - "platform": "Facebook", - "platformId": "341163402640457", - "verified": True, - }, - "summary": { - "loveCount": 1378, - "threePlusMinuteVideoCount": 11, - "totalInteractionCount": 25669, - "wowCount": 678, - "thankfulCount": 0, - "interactionRate": 0.009795233258952069, - "likeCount": 7300, - "hahaCount": 1634, - "commentCount": 3654, - "shareCount": 5764, - "sadCount": 1389, - "angryCount": 3872, - "totalVideoTimeMS": 3773150, - "postCount": 18, - }, - "breakdown": { - "native_video": { - "loveCount": 498, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 4671, - "wowCount": 214, - "thankfulCount": 0, - "interactionRate": 0.008016155128469189, - "likeCount": 2036, - "hahaCount": 63, - "commentCount": 198, - "shareCount": 1121, - "sadCount": 495, - "angryCount": 46, - "totalVideoTimeMS": 760141, - "postCount": 4, - }, - "owned_video": { - "loveCount": 1209, - "threePlusMinuteVideoCount": 7, - "totalInteractionCount": 12700, - "wowCount": 353, - "thankfulCount": 0, - "interactionRate": 0.007926857770568506, - "likeCount": 5548, - "hahaCount": 405, - "commentCount": 1742, - "shareCount": 2653, - "sadCount": 542, - "angryCount": 248, - "totalVideoTimeMS": 1839681, - "postCount": 11, - }, - "crosspost": { - "loveCount": 711, - "threePlusMinuteVideoCount": 5, - "totalInteractionCount": 8029, - "wowCount": 139, - "thankfulCount": 0, - "interactionRate": 0.00787877457785275, - "likeCount": 3512, - "hahaCount": 342, - "commentCount": 1544, - "shareCount": 1532, - "sadCount": 47, - "angryCount": 202, - "totalVideoTimeMS": 1079540, - "postCount": 7, - }, - "share": { - "loveCount": 169, - "threePlusMinuteVideoCount": 4, - "totalInteractionCount": 12969, - "wowCount": 325, - "thankfulCount": 0, - "interactionRate": 0.01272143898708221, - "likeCount": 1752, - "hahaCount": 1229, - "commentCount": 1912, - "shareCount": 3111, - "sadCount": 847, - "angryCount": 3624, - "totalVideoTimeMS": 1933469, - "postCount": 7, - }, - }, - "subscriberData": {"initialCount": 14557547, "finalCount": 14558656}, - }, - { - "account": { - "id": 17943, - "name": "Bill Maher", - "handle": "Maher", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/48420187_10156168579487297_4906298196289912832_n.jpg?_nc_cat=1&_nc_oc=AQlC_apxui0Ni6EzTgjAOj02RffSNKA6MYVKQtmsFrJCXWFg9cA4llD0r9MEzEWZUvY&_nc_ht=scontent.xx&oh=f92277539c631c4b38ee16136d790a4f&oe=5DF40276", - "subscriberCount": 3966346, - "url": "https://www.facebook.com/62507427296", - "platform": "Facebook", - "platformId": "62507427296", - "verified": True, - }, - "summary": { - "loveCount": 1611, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 25278, - "wowCount": 85, - "thankfulCount": 0, - "interactionRate": 0.3186502279890824, - "likeCount": 12339, - "hahaCount": 3218, - "commentCount": 1499, - "shareCount": 6434, - "sadCount": 43, - "angryCount": 49, - "totalVideoTimeMS": 827303, - "postCount": 2, - }, - "breakdown": { - "native_video": { - "loveCount": 1611, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 25278, - "wowCount": 85, - "thankfulCount": 0, - "interactionRate": 0.3186502279890824, - "likeCount": 12339, - "hahaCount": 3218, - "commentCount": 1499, - "shareCount": 6434, - "sadCount": 43, - "angryCount": 49, - "totalVideoTimeMS": 827303, - "postCount": 2, - }, - "owned_video": { - "loveCount": 1611, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 25278, - "wowCount": 85, - "thankfulCount": 0, - "interactionRate": 0.3186502279890824, - "likeCount": 12339, - "hahaCount": 3218, - "commentCount": 1499, - "shareCount": 6434, - "sadCount": 43, - "angryCount": 49, - "totalVideoTimeMS": 827303, - "postCount": 2, - }, - }, - "subscriberData": {"initialCount": 3966490, "finalCount": 3966346}, - }, - { - "account": { - "id": 30245, - "name": "IJR Red", - "handle": "IJRRed", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/23376285_10156265164197971_2450414612163288246_n.jpg?_nc_cat=1&_nc_oc=AQm4KDy-Qmj38dJbaAQ0KXPVdY94zu7JBQAIUkAO2_W0uRWIl-5aI18nffFvxZoVICg&_nc_ht=scontent.xx&oh=ab7b4676afa9874079a36c20150411f5&oe=5E0C3B40", - "subscriberCount": 8531658, - "url": "https://www.facebook.com/189885532970", - "platform": "Facebook", - "platformId": "189885532970", - "verified": True, - }, - "summary": { - "loveCount": 407, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 23146, - "wowCount": 560, - "thankfulCount": 0, - "interactionRate": 0.03390793789748867, - "likeCount": 6042, - "hahaCount": 3595, - "commentCount": 4576, - "shareCount": 4828, - "sadCount": 147, - "angryCount": 2991, - "totalVideoTimeMS": 370215, - "postCount": 8, - }, - "breakdown": { - "owned_video": { - "loveCount": 286, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 5441, - "wowCount": 17, - "thankfulCount": 0, - "interactionRate": 0.03188025962017601, - "likeCount": 2375, - "hahaCount": 317, - "commentCount": 1095, - "shareCount": 673, - "sadCount": 19, - "angryCount": 659, - "totalVideoTimeMS": 370215, - "postCount": 2, - }, - "crosspost": { - "loveCount": 286, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 5441, - "wowCount": 17, - "thankfulCount": 0, - "interactionRate": 0.03188025962017601, - "likeCount": 2375, - "hahaCount": 317, - "commentCount": 1095, - "shareCount": 673, - "sadCount": 19, - "angryCount": 659, - "totalVideoTimeMS": 370215, - "postCount": 2, - }, - "link": { - "shareCount": 4155, - "loveCount": 121, - "totalInteractionCount": 17705, - "wowCount": 543, - "sadCount": 128, - "angryCount": 2332, - "thankfulCount": 0, - "postCount": 6, - "interactionRate": 0.034576016867470305, - "likeCount": 3667, - "hahaCount": 3278, - "commentCount": 3481, - }, - }, - "subscriberData": {"initialCount": 8532193, "finalCount": 8531658}, - }, - { - "account": { - "id": 19471, - "name": "CNN Politics", - "handle": "cnnpolitics", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22450067_1835100979865060_6024097554775073207_n.png?_nc_cat=1&_nc_oc=AQmpWGKTrzg30Lmmy5ncZ5txlFyDirtObkp2leejFgez6t02RAflIlctecGiymX0NU8&_nc_ht=scontent.xx&oh=bbc41bdb10ef689246595025fc23b309&oe=5E070315", - "subscriberCount": 2855693, - "url": "https://www.facebook.com/219367258105115", - "platform": "Facebook", - "platformId": "219367258105115", - "verified": True, - }, - "summary": { - "shareCount": 3683, - "loveCount": 1448, - "totalInteractionCount": 21344, - "wowCount": 476, - "sadCount": 125, - "angryCount": 1501, - "thankfulCount": 0, - "postCount": 14, - "interactionRate": 0.05336895933155728, - "likeCount": 7316, - "hahaCount": 2685, - "commentCount": 4110, - }, - "breakdown": { - "link": { - "shareCount": 3683, - "loveCount": 1448, - "totalInteractionCount": 21344, - "wowCount": 476, - "sadCount": 125, - "angryCount": 1501, - "thankfulCount": 0, - "postCount": 14, - "interactionRate": 0.05336895933155728, - "likeCount": 7316, - "hahaCount": 2685, - "commentCount": 4110, - } - }, - "subscriberData": {"initialCount": 2855492, "finalCount": 2855693}, - }, - { - "account": { - "id": 6786, - "name": "The Young Turks", - "handle": "TheYoungTurks", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1003713_10151543513399205_523422522_n.jpg?_nc_cat=1&_nc_oc=AQnnXFBTIz-GDK79X4ZL1tWD8ZS5F3y_makkEyxpcCf_7U3QmoBvJjb9aWlpiMT8dro&_nc_ht=scontent.xx&oh=5684bdb9a01611f4ca6e9ea9dedbc57e&oe=5DF64CB5", - "subscriberCount": 2100186, - "url": "https://www.facebook.com/210277954204", - "platform": "Facebook", - "platformId": "210277954204", - "verified": True, - }, - "summary": { - "loveCount": 751, - "threePlusMinuteVideoCount": 22, - "totalInteractionCount": 20554, - "wowCount": 589, - "thankfulCount": 0, - "interactionRate": 0.02571346533229654, - "likeCount": 6504, - "hahaCount": 1670, - "commentCount": 2256, - "shareCount": 5521, - "sadCount": 420, - "angryCount": 2843, - "totalVideoTimeMS": 16356669, - "postCount": 38, - }, - "breakdown": { - "native_video": { - "loveCount": 77, - "threePlusMinuteVideoCount": 5, - "totalInteractionCount": 2183, - "wowCount": 48, - "thankfulCount": 0, - "interactionRate": 0.020761242379409798, - "likeCount": 890, - "hahaCount": 359, - "commentCount": 306, - "shareCount": 439, - "sadCount": 24, - "angryCount": 40, - "totalVideoTimeMS": 2126332, - "postCount": 5, - }, - "owned_video": { - "loveCount": 459, - "threePlusMinuteVideoCount": 21, - "totalInteractionCount": 16329, - "wowCount": 488, - "thankfulCount": 0, - "interactionRate": 0.03533220606771117, - "likeCount": 4873, - "hahaCount": 1162, - "commentCount": 1770, - "shareCount": 4805, - "sadCount": 388, - "angryCount": 2384, - "totalVideoTimeMS": 9264722, - "postCount": 22, - }, - "crosspost": { - "loveCount": 382, - "threePlusMinuteVideoCount": 16, - "totalInteractionCount": 14146, - "wowCount": 440, - "thankfulCount": 0, - "interactionRate": 0.039617783623093934, - "likeCount": 3983, - "hahaCount": 803, - "commentCount": 1464, - "shareCount": 4366, - "sadCount": 364, - "angryCount": 2344, - "totalVideoTimeMS": 7138390, - "postCount": 17, - }, - "share": { - "loveCount": 27, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 323, - "wowCount": 2, - "thankfulCount": 0, - "interactionRate": 0.015380461670984783, - "likeCount": 179, - "hahaCount": 9, - "commentCount": 23, - "shareCount": 80, - "sadCount": 0, - "angryCount": 3, - "totalVideoTimeMS": 7091947, - "postCount": 1, - }, - "video": { - "shareCount": 636, - "loveCount": 265, - "totalInteractionCount": 3902, - "wowCount": 99, - "sadCount": 32, - "angryCount": 456, - "thankfulCount": 0, - "postCount": 15, - "interactionRate": 0.012380557382216852, - "likeCount": 1452, - "hahaCount": 499, - "commentCount": 463, - }, - }, - "subscriberData": {"initialCount": 2099948, "finalCount": 2100186}, - }, - { - "account": { - "id": 650861, - "name": "Daily Wire", - "handle": "DailyWire", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/27655057_1815794295383060_2228253987427136016_n.png?_nc_cat=1&_nc_oc=AQm_uPD8ZwlgfmUIjiJBxewrWpNXIPkUpDdGdWdkYu9LXrRzIuUYx8pGdp5Kmcz1HU8&_nc_ht=scontent.xx&oh=ab8e2768dce63a6200349ce2d7dc8a11&oe=5DF6BB9F", - "subscriberCount": 1934601, - "url": "https://www.facebook.com/1435071773455316", - "platform": "Facebook", - "platformId": "1435071773455316", - "verified": True, - }, - "summary": { - "loveCount": 320, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 20071, - "wowCount": 506, - "thankfulCount": 0, - "interactionRate": 0.01364644339569, - "likeCount": 6269, - "hahaCount": 3786, - "commentCount": 4376, - "shareCount": 2066, - "sadCount": 427, - "angryCount": 2321, - "totalVideoTimeMS": 628754, - "postCount": 76, - }, - "breakdown": { - "native_video": { - "loveCount": 28, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 1360, - "wowCount": 41, - "thankfulCount": 0, - "interactionRate": 0.03514992995859545, - "likeCount": 766, - "hahaCount": 78, - "commentCount": 125, - "shareCount": 272, - "sadCount": 15, - "angryCount": 35, - "totalVideoTimeMS": 628754, - "postCount": 2, - }, - "owned_video": { - "loveCount": 28, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 1360, - "wowCount": 41, - "thankfulCount": 0, - "interactionRate": 0.03514992995859545, - "likeCount": 766, - "hahaCount": 78, - "commentCount": 125, - "shareCount": 272, - "sadCount": 15, - "angryCount": 35, - "totalVideoTimeMS": 628754, - "postCount": 2, - }, - "link": { - "shareCount": 1794, - "loveCount": 292, - "totalInteractionCount": 18711, - "wowCount": 465, - "sadCount": 412, - "angryCount": 2286, - "thankfulCount": 0, - "postCount": 74, - "interactionRate": 0.013026150514067726, - "likeCount": 5503, - "hahaCount": 3708, - "commentCount": 4251, - }, - }, - "subscriberData": {"initialCount": 1934539, "finalCount": 1934601}, - }, - { - "account": { - "id": 18752, - "name": "New York Daily News", - "handle": "NYDailyNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/34963357_10155516739962541_1916910854155010048_n.jpg?_nc_cat=1&_nc_oc=AQmjFK4eo-CK8fL21CSJr1btV3Al6e74byD7EyXVL8apaCEHf5ql7TW_ZRkUiYID0qY&_nc_ht=scontent.xx&oh=e33f579d2d00c6afc68a0e7cbd70b6c8&oe=5E0623E1", - "subscriberCount": 3120017, - "url": "https://www.facebook.com/268914272540", - "platform": "Facebook", - "platformId": "268914272540", - "verified": True, - }, - "summary": { - "shareCount": 4828, - "loveCount": 287, - "totalInteractionCount": 20002, - "wowCount": 1060, - "sadCount": 2842, - "angryCount": 2438, - "thankfulCount": 0, - "postCount": 61, - "interactionRate": 0.010481274817903877, - "likeCount": 4230, - "hahaCount": 1160, - "commentCount": 3157, - }, - "breakdown": { - "link": { - "shareCount": 4624, - "loveCount": 284, - "totalInteractionCount": 19262, - "wowCount": 1040, - "sadCount": 2833, - "angryCount": 2400, - "thankfulCount": 0, - "postCount": 59, - "interactionRate": 0.010449221989714567, - "likeCount": 3961, - "hahaCount": 1058, - "commentCount": 3062, - }, - "photo": { - "shareCount": 204, - "loveCount": 3, - "totalInteractionCount": 740, - "wowCount": 20, - "sadCount": 9, - "angryCount": 38, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.011859546430044143, - "likeCount": 269, - "hahaCount": 102, - "commentCount": 95, - }, - }, - "subscriberData": {"initialCount": 3119682, "finalCount": 3120017}, - }, - { - "account": { - "id": 6648, - "name": "Business Insider", - "handle": "businessinsider", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/20140008_10154867513079071_8190657407315988923_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkI55CBCj4kJdip-PX9AJ_S4mxJ5XQ4nlum3ikySzQgBRQCJSXsyjHW-8w8qPH2aX4&_nc_ht=scontent.xx&oh=4d024551fc98af700d89602c6980c3c0&oe=5E155CB9", - "subscriberCount": 9107575, - "url": "https://www.facebook.com/20446254070", - "platform": "Facebook", - "platformId": "20446254070", - "verified": True, - }, - "summary": { - "loveCount": 698, - "threePlusMinuteVideoCount": 5, - "totalInteractionCount": 19946, - "wowCount": 577, - "thankfulCount": 0, - "interactionRate": 0.0010211595794074279, - "likeCount": 8305, - "hahaCount": 1471, - "commentCount": 3626, - "shareCount": 4146, - "sadCount": 379, - "angryCount": 744, - "totalVideoTimeMS": 1519862, - "postCount": 213, - }, - "breakdown": { - "link": { - "shareCount": 3877, - "loveCount": 658, - "totalInteractionCount": 19022, - "wowCount": 538, - "sadCount": 379, - "angryCount": 744, - "thankfulCount": 0, - "postCount": 208, - "interactionRate": 0.0009991991583449024, - "likeCount": 7804, - "hahaCount": 1471, - "commentCount": 3551, - }, - "share": { - "loveCount": 40, - "threePlusMinuteVideoCount": 5, - "totalInteractionCount": 924, - "wowCount": 39, - "thankfulCount": 0, - "interactionRate": 0.00202035873775233, - "likeCount": 501, - "hahaCount": 0, - "commentCount": 75, - "shareCount": 269, - "sadCount": 0, - "angryCount": 0, - "totalVideoTimeMS": 1519862, - "postCount": 5, - }, - }, - "subscriberData": {"initialCount": 9107012, "finalCount": 9107575}, - }, - { - "account": { - "id": 379565, - "name": "Chicks On The Right", - "handle": "ChicksOnTheRight", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64300377_10161966021395494_5710723125331623936_n.jpg?_nc_cat=1&_nc_oc=AQnx_5K6idsI6rYjNhjov9DcFdSjNH3jYlaOOOdv1Kp4cYJLSRoNvw8yqjeqQEzQtfE&_nc_ht=scontent.xx&oh=31f36fd2da3350e594e6e30d67309d20&oe=5DF779FF", - "subscriberCount": 1257694, - "url": "https://www.facebook.com/195530355493", - "platform": "Facebook", - "platformId": "195530355493", - "verified": True, - }, - "summary": { - "shareCount": 2639, - "loveCount": 434, - "totalInteractionCount": 19527, - "wowCount": 368, - "sadCount": 618, - "angryCount": 1640, - "thankfulCount": 0, - "postCount": 21, - "interactionRate": 0.07386425753103353, - "likeCount": 6224, - "hahaCount": 3694, - "commentCount": 3910, - }, - "breakdown": { - "link": { - "shareCount": 352, - "loveCount": 150, - "totalInteractionCount": 3459, - "wowCount": 143, - "sadCount": 192, - "angryCount": 507, - "thankfulCount": 0, - "postCount": 13, - "interactionRate": 0.02114950753848753, - "likeCount": 1110, - "hahaCount": 453, - "commentCount": 552, - }, - "photo": { - "shareCount": 2287, - "loveCount": 284, - "totalInteractionCount": 16068, - "wowCount": 225, - "sadCount": 426, - "angryCount": 1133, - "thankfulCount": 0, - "postCount": 8, - "interactionRate": 0.15965492908752993, - "likeCount": 5114, - "hahaCount": 3241, - "commentCount": 3358, - }, - }, - "subscriberData": {"initialCount": 1257731, "finalCount": 1257694}, - }, - { - "account": { - "id": 7777, - "name": "PBS NewsHour", - "handle": "newshour", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/c2.0.200.200a/p200x200/303161_10150312469923675_881915800_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlncoeS4CvKUmO2uTUydTKWAioHD0iWx6bl9DqkBkwnCZgpb6CCkyZj7aidr38Ug1k&_nc_ht=scontent.xx&oh=0d6d1417f6b982eac877d479f2404a37&oe=5E0E2C5A", - "subscriberCount": 1417219, - "url": "https://www.facebook.com/6491828674", - "platform": "Facebook", - "platformId": "6491828674", - "verified": True, - }, - "summary": { - "shareCount": 4169, - "loveCount": 214, - "totalInteractionCount": 18916, - "wowCount": 579, - "sadCount": 3167, - "angryCount": 2882, - "thankfulCount": 0, - "postCount": 21, - "interactionRate": 0.06350568305301454, - "likeCount": 5238, - "hahaCount": 693, - "commentCount": 1974, - }, - "breakdown": { - "link": { - "shareCount": 4169, - "loveCount": 214, - "totalInteractionCount": 18916, - "wowCount": 579, - "sadCount": 3167, - "angryCount": 2882, - "thankfulCount": 0, - "postCount": 21, - "interactionRate": 0.06350568305301454, - "likeCount": 5238, - "hahaCount": 693, - "commentCount": 1974, - } - }, - "subscriberData": {"initialCount": 1417173, "finalCount": 1417219}, - }, - { - "account": { - "id": 2392855, - "name": "Alexandria Ocasio-Cortez", - "handle": "OcasioCortez", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22730535_1481259015298111_3425440004620635228_n.jpg?_nc_cat=1&_nc_oc=AQmDWxgsbW3C7swpQPubkVVpHgc5Qco0_dIf4b9uxBEeL7goF8cpcBerkfe1h4kNuyA&_nc_ht=scontent.xx&oh=22b0c36140209b2ef9d9e8033f843cf8&oe=5E05DF27", - "subscriberCount": 959516, - "url": "https://www.facebook.com/1316372698453411", - "platform": "Facebook", - "platformId": "1316372698453411", - "verified": True, - }, - "summary": { - "shareCount": 1550, - "loveCount": 476, - "totalInteractionCount": 18378, - "wowCount": 324, - "sadCount": 85, - "angryCount": 2460, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.9578215579167252, - "likeCount": 11326, - "hahaCount": 479, - "commentCount": 1678, - }, - "breakdown": { - "link": { - "shareCount": 1550, - "loveCount": 476, - "totalInteractionCount": 18378, - "wowCount": 324, - "sadCount": 85, - "angryCount": 2460, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.9578215579167252, - "likeCount": 11326, - "hahaCount": 479, - "commentCount": 1678, - } - }, - "subscriberData": {"initialCount": 959213, "finalCount": 959516}, - }, - { - "account": { - "id": 6892, - "name": "TheBlaze", - "handle": "TheBlaze", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/47350623_2141870595850269_7864140219111440384_n.png?_nc_cat=1&_nc_oc=AQmGyVQswjmmaInAkgMKbLJ62jAcb2BShbL78435-MqCEBLedhKr7VO97Nzxt2x220k&_nc_ht=scontent.xx&oh=4a5ce0b44b6400aab9bb78aa2afdee87&oe=5E011864", - "subscriberCount": 2089166, - "url": "https://www.facebook.com/140738092630206", - "platform": "Facebook", - "platformId": "140738092630206", - "verified": True, - }, - "summary": { - "loveCount": 444, - "threePlusMinuteVideoCount": 3, - "totalInteractionCount": 17210, - "wowCount": 231, - "thankfulCount": 0, - "interactionRate": 0.03743126731405527, - "likeCount": 4069, - "hahaCount": 3750, - "commentCount": 3896, - "shareCount": 2670, - "sadCount": 420, - "angryCount": 1730, - "totalVideoTimeMS": 1284363, - "postCount": 22, - }, - "breakdown": { - "owned_video": { - "loveCount": 10, - "totalInteractionCount": 548, - "wowCount": 4, - "thankfulCount": 0, - "interactionRate": 0.026230606762279146, - "likeCount": 265, - "hahaCount": 0, - "commentCount": 20, - "shareCount": 242, - "sadCount": 1, - "angryCount": 6, - "totalVideoTimeMS": 176169, - "postCount": 1, - }, - "crosspost": { - "loveCount": 10, - "totalInteractionCount": 548, - "wowCount": 4, - "thankfulCount": 0, - "interactionRate": 0.026230606762279146, - "likeCount": 265, - "hahaCount": 0, - "commentCount": 20, - "shareCount": 242, - "sadCount": 1, - "angryCount": 6, - "totalVideoTimeMS": 176169, - "postCount": 1, - }, - "link": { - "shareCount": 2361, - "loveCount": 400, - "totalInteractionCount": 16005, - "wowCount": 224, - "sadCount": 419, - "angryCount": 1724, - "thankfulCount": 0, - "postCount": 17, - "interactionRate": 0.045041972560774954, - "likeCount": 3295, - "hahaCount": 3732, - "commentCount": 3850, - }, - "share": { - "loveCount": 34, - "threePlusMinuteVideoCount": 3, - "totalInteractionCount": 657, - "wowCount": 3, - "thankfulCount": 0, - "interactionRate": 0.007850035600390107, - "likeCount": 509, - "hahaCount": 18, - "commentCount": 26, - "shareCount": 67, - "sadCount": 0, - "angryCount": 0, - "totalVideoTimeMS": 1108194, - "postCount": 4, - }, - }, - "subscriberData": {"initialCount": 2089159, "finalCount": 2089166}, - }, - { - "account": { - "id": 35336, - "name": "CNSNews.com", - "handle": "cnsnewscom", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22852132_10154846552035927_4321589519758565624_n.png?_nc_cat=1&_nc_oc=AQnmIVH1IVnR6t9S83Hf7HR7IsOdXqL8cDu8PCeK2_FzNcj6tfQmG0zCCYrZQZkrhjk&_nc_ht=scontent.xx&oh=8c7180ce5469f10781bebacf2fff223c&oe=5E092B8F", - "subscriberCount": 2316273, - "url": "https://www.facebook.com/19420215926", - "platform": "Facebook", - "platformId": "19420215926", - "verified": True, - }, - "summary": { - "loveCount": 620, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 15415, - "wowCount": 235, - "thankfulCount": 0, - "interactionRate": 0.06652921899834453, - "likeCount": 5723, - "hahaCount": 1227, - "commentCount": 3450, - "shareCount": 2027, - "sadCount": 499, - "angryCount": 1634, - "totalVideoTimeMS": 868494, - "postCount": 10, - }, - "breakdown": { - "owned_video": { - "loveCount": 44, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 1955, - "wowCount": 17, - "thankfulCount": 0, - "interactionRate": 0.021068305562097427, - "likeCount": 572, - "hahaCount": 402, - "commentCount": 384, - "shareCount": 345, - "sadCount": 14, - "angryCount": 177, - "totalVideoTimeMS": 868494, - "postCount": 4, - }, - "crosspost": { - "loveCount": 44, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 1955, - "wowCount": 17, - "thankfulCount": 0, - "interactionRate": 0.021068305562097427, - "likeCount": 572, - "hahaCount": 402, - "commentCount": 384, - "shareCount": 345, - "sadCount": 14, - "angryCount": 177, - "totalVideoTimeMS": 868494, - "postCount": 4, - }, - "link": { - "shareCount": 1682, - "loveCount": 576, - "totalInteractionCount": 13460, - "wowCount": 218, - "sadCount": 485, - "angryCount": 1457, - "thankfulCount": 0, - "postCount": 6, - "interactionRate": 0.09683649462250929, - "likeCount": 5151, - "hahaCount": 825, - "commentCount": 3066, - }, - }, - "subscriberData": {"initialCount": 2316278, "finalCount": 2316273}, - }, - { - "account": { - "id": 13844, - "name": "HuffPost Politics", - "handle": "HuffPostPolitics", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/18838902_10155124699752911_6971495653588629046_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQm5cko-OrQpOcPI-GqgP9V74INLYLzur0WIBNnrYmgNA33fLG0VMMxSWpg2i7235p0&_nc_ht=scontent.xx&oh=755100a2afdbaf29d5e08e613e66fc6e&oe=5DF42A6A", - "subscriberCount": 2107783, - "url": "https://www.facebook.com/56845382910", - "platform": "Facebook", - "platformId": "56845382910", - "verified": True, - }, - "summary": { - "shareCount": 2778, - "loveCount": 162, - "totalInteractionCount": 14995, - "wowCount": 532, - "sadCount": 619, - "angryCount": 2138, - "thankfulCount": 0, - "postCount": 18, - "interactionRate": 0.03951897859807728, - "likeCount": 2983, - "hahaCount": 2425, - "commentCount": 3358, - }, - "breakdown": { - "link": { - "shareCount": 2778, - "loveCount": 162, - "totalInteractionCount": 14995, - "wowCount": 532, - "sadCount": 619, - "angryCount": 2138, - "thankfulCount": 0, - "postCount": 18, - "interactionRate": 0.03951897859807728, - "likeCount": 2983, - "hahaCount": 2425, - "commentCount": 3358, - } - }, - "subscriberData": {"initialCount": 2107913, "finalCount": 2107783}, - }, - { - "account": { - "id": 10275, - "name": "POLITICO", - "handle": "politico", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12366404_10153208310706680_910786628287677974_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQma-VwdF4pnf7Qd94b720eyuT713ikAnUhD26cnrAU-j-98Nm8RizAuzhqPFP_BlLY&_nc_ht=scontent.xx&oh=706731dfa1a42c518d02de9a35c97541&oe=5E029DCC", - "subscriberCount": 1879882, - "url": "https://www.facebook.com/62317591679", - "platform": "Facebook", - "platformId": "62317591679", - "verified": True, - }, - "summary": { - "shareCount": 1794, - "loveCount": 132, - "totalInteractionCount": 14656, - "wowCount": 358, - "sadCount": 443, - "angryCount": 5090, - "thankfulCount": 0, - "postCount": 10, - "interactionRate": 0.0779381968718853, - "likeCount": 1825, - "hahaCount": 1630, - "commentCount": 3384, - }, - "breakdown": { - "link": { - "shareCount": 1794, - "loveCount": 132, - "totalInteractionCount": 14656, - "wowCount": 358, - "sadCount": 443, - "angryCount": 5090, - "thankfulCount": 0, - "postCount": 10, - "interactionRate": 0.0779381968718853, - "likeCount": 1825, - "hahaCount": 1630, - "commentCount": 3384, - } - }, - "subscriberData": {"initialCount": 1879507, "finalCount": 1879882}, - }, - { - "account": { - "id": 10342, - "name": "New York Post", - "handle": "NYPost", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12932928_10157483552025206_1176575955706691041_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQnPmbZuC7S1v1NTPRZ7rWQU4EucwAW3nKx-aXD0PzlPsD3ifQpdaLcXEegH730Wy_o&_nc_ht=scontent.xx&oh=c77d86309611fa2972df1979bf6cab9e&oe=5E0827CA", - "subscriberCount": 4183079, - "url": "https://www.facebook.com/134486075205", - "platform": "Facebook", - "platformId": "134486075205", - "verified": True, - }, - "summary": { - "shareCount": 3305, - "loveCount": 374, - "totalInteractionCount": 13825, - "wowCount": 993, - "sadCount": 538, - "angryCount": 313, - "thankfulCount": 0, - "postCount": 29, - "interactionRate": 0.011379394140496551, - "likeCount": 3600, - "hahaCount": 2201, - "commentCount": 2501, - }, - "breakdown": { - "link": { - "shareCount": 3305, - "loveCount": 374, - "totalInteractionCount": 13825, - "wowCount": 993, - "sadCount": 538, - "angryCount": 313, - "thankfulCount": 0, - "postCount": 29, - "interactionRate": 0.011379394140496551, - "likeCount": 3600, - "hahaCount": 2201, - "commentCount": 2501, - } - }, - "subscriberData": {"initialCount": 4182920, "finalCount": 4183079}, - }, - { - "account": { - "id": 6646, - "name": "VICE", - "handle": "VICE", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13427861_1304295039603751_2178102892370936049_n.jpg?_nc_cat=1&_nc_oc=AQmzoEUjC5BCCMVSsIFvWa52KGr3Iqh9f0Y_eezqYMFw7h_EUam7WQdYxEFvJB6LoP0&_nc_ht=scontent.xx&oh=847f8eb6c5132c90382bc0940afbc692&oe=5E02C5BA", - "subscriberCount": 8177544, - "url": "https://www.facebook.com/167115176655082", - "platform": "Facebook", - "platformId": "167115176655082", - "verified": True, - }, - "summary": { - "loveCount": 660, - "threePlusMinuteVideoCount": 5, - "totalInteractionCount": 13003, - "wowCount": 322, - "thankfulCount": 0, - "interactionRate": 0.00360819017583995, - "likeCount": 5818, - "hahaCount": 1403, - "commentCount": 2892, - "shareCount": 1641, - "sadCount": 92, - "angryCount": 175, - "totalVideoTimeMS": 1654965, - "postCount": 44, - }, - "breakdown": { - "owned_video": { - "loveCount": 116, - "threePlusMinuteVideoCount": 5, - "totalInteractionCount": 2014, - "wowCount": 113, - "thankfulCount": 0, - "interactionRate": 0.004916923561653084, - "likeCount": 733, - "hahaCount": 266, - "commentCount": 398, - "shareCount": 339, - "sadCount": 17, - "angryCount": 32, - "totalVideoTimeMS": 1654965, - "postCount": 5, - }, - "crosspost": { - "loveCount": 116, - "threePlusMinuteVideoCount": 5, - "totalInteractionCount": 2014, - "wowCount": 113, - "thankfulCount": 0, - "interactionRate": 0.004916923561653084, - "likeCount": 733, - "hahaCount": 266, - "commentCount": 398, - "shareCount": 339, - "sadCount": 17, - "angryCount": 32, - "totalVideoTimeMS": 1654965, - "postCount": 5, - }, - "link": { - "shareCount": 1191, - "loveCount": 472, - "totalInteractionCount": 9861, - "wowCount": 165, - "sadCount": 65, - "angryCount": 141, - "thankfulCount": 0, - "postCount": 36, - "interactionRate": 0.003339104806794259, - "likeCount": 4538, - "hahaCount": 1021, - "commentCount": 2268, - }, - "photo": { - "shareCount": 111, - "loveCount": 72, - "totalInteractionCount": 1128, - "wowCount": 44, - "sadCount": 10, - "angryCount": 2, - "thankfulCount": 0, - "postCount": 3, - "interactionRate": 0.0045989135800536315, - "likeCount": 547, - "hahaCount": 116, - "commentCount": 226, - }, - }, - "subscriberData": {"initialCount": 8174144, "finalCount": 8177544}, - }, - { - "account": { - "id": 13991, - "name": "Washington Examiner", - "handle": "WashingtonExaminer", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/36928610_10156017618514160_6905131952433528832_n.jpg?_nc_cat=111&_nc_oc=AQnKuEJBvxlMgc-zQHzSfEtsgFfHehn1pucacRbqrYlmmQp69EGwogOuyEUo-OV8OWM&_nc_ht=scontent.xx&oh=88b1063a5362110cc87fb9d6caedea35&oe=5DFE6885", - "subscriberCount": 714626, - "url": "https://www.facebook.com/40656699159", - "platform": "Facebook", - "platformId": "40656699159", - "verified": True, - }, - "summary": { - "shareCount": 1899, - "loveCount": 295, - "totalInteractionCount": 11994, - "wowCount": 117, - "sadCount": 75, - "angryCount": 1175, - "thankfulCount": 0, - "postCount": 20, - "interactionRate": 0.08381942301731732, - "likeCount": 3007, - "hahaCount": 1864, - "commentCount": 3562, - }, - "breakdown": { - "link": { - "shareCount": 1899, - "loveCount": 295, - "totalInteractionCount": 11994, - "wowCount": 117, - "sadCount": 75, - "angryCount": 1175, - "thankfulCount": 0, - "postCount": 20, - "interactionRate": 0.08381942301731732, - "likeCount": 3007, - "hahaCount": 1864, - "commentCount": 3562, - } - }, - "subscriberData": {"initialCount": 714637, "finalCount": 714626}, - }, - { - "account": { - "id": 1431632, - "name": "Axios", - "handle": "axiosnews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/46844445_2289864377926961_9207563348864925696_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQncZ-V-nWa7ihCtPUY2OE7NX8kzbdrK9hiEMhqNa6qBeOKkh3VKYYgS2lvKd-xjZnI&_nc_ht=scontent.xx&oh=3fa348414b7b9cfcabc2cd5bc93789f4&oe=5E0F6422", - "subscriberCount": 339358, - "url": "https://www.facebook.com/1830665590513511", - "platform": "Facebook", - "platformId": "1830665590513511", - "verified": True, - }, - "summary": { - "shareCount": 2393, - "loveCount": 404, - "totalInteractionCount": 11534, - "wowCount": 376, - "sadCount": 820, - "angryCount": 1586, - "thankfulCount": 0, - "postCount": 26, - "interactionRate": 0.13054426349313464, - "likeCount": 3017, - "hahaCount": 847, - "commentCount": 2091, - }, - "breakdown": { - "link": { - "shareCount": 2393, - "loveCount": 404, - "totalInteractionCount": 11534, - "wowCount": 376, - "sadCount": 820, - "angryCount": 1586, - "thankfulCount": 0, - "postCount": 26, - "interactionRate": 0.13054426349313464, - "likeCount": 3017, - "hahaCount": 847, - "commentCount": 2091, - } - }, - "subscriberData": {"initialCount": 339339, "finalCount": 339358}, - }, - { - "account": { - "id": 44680, - "name": "NBC Nightly News with Lester Holt", - "handle": "nbcnightlynews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15267620_10154852089253689_1498794491363762450_n.jpg?_nc_cat=1&_nc_oc=AQl8slY4bj3RknUwuA59x-FHZ63POsUD31th1TnRryuOSYs7o8qHGIfTo5RiNj1hZx4&_nc_ht=scontent.xx&oh=2eaaf6b66b32d649aa32cbc311d57be4&oe=5E129FF0", - "subscriberCount": 3494646, - "url": "https://www.facebook.com/114288853688", - "platform": "Facebook", - "platformId": "114288853688", - "verified": True, - }, - "summary": { - "loveCount": 691, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 11384, - "wowCount": 887, - "thankfulCount": 0, - "interactionRate": 0.009843653311681112, - "likeCount": 3937, - "hahaCount": 417, - "commentCount": 1173, - "shareCount": 1879, - "sadCount": 1939, - "angryCount": 461, - "totalVideoTimeMS": 939583, - "postCount": 33, - }, - "breakdown": { - "native_video": { - "loveCount": 104, - "totalInteractionCount": 870, - "wowCount": 16, - "thankfulCount": 0, - "interactionRate": 0.0049790571983503295, - "likeCount": 488, - "hahaCount": 7, - "commentCount": 93, - "shareCount": 159, - "sadCount": 2, - "angryCount": 1, - "totalVideoTimeMS": 479375, - "postCount": 5, - }, - "owned_video": { - "loveCount": 254, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 1714, - "wowCount": 32, - "thankfulCount": 0, - "interactionRate": 0.005436901538428521, - "likeCount": 981, - "hahaCount": 10, - "commentCount": 127, - "shareCount": 303, - "sadCount": 5, - "angryCount": 2, - "totalVideoTimeMS": 939583, - "postCount": 9, - }, - "crosspost": { - "loveCount": 150, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 844, - "wowCount": 16, - "thankfulCount": 0, - "interactionRate": 0.006037822234781147, - "likeCount": 493, - "hahaCount": 3, - "commentCount": 34, - "shareCount": 144, - "sadCount": 3, - "angryCount": 1, - "totalVideoTimeMS": 460208, - "postCount": 4, - }, - "link": { - "shareCount": 1576, - "loveCount": 437, - "totalInteractionCount": 9670, - "wowCount": 855, - "sadCount": 1934, - "angryCount": 459, - "thankfulCount": 0, - "postCount": 24, - "interactionRate": 0.011503339044464555, - "likeCount": 2956, - "hahaCount": 407, - "commentCount": 1046, - }, - }, - "subscriberData": {"initialCount": 3494629, "finalCount": 3494646}, - }, - { - "account": { - "id": 10343, - "name": "Bloomberg", - "handle": "bloombergbusiness", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/31790536_10156383343951880_9143173959372505088_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQm0CmNHVi4wKjfV2xKZ8WmMFbjVnwkn6rwlbqPewk5wTL0Plzu-cY8b0zLOAhS4DLw&_nc_ht=scontent.xx&oh=6eda22b5a7936ec78ea6929b3ed38430&oe=5E1356BD", - "subscriberCount": 2955809, - "url": "https://www.facebook.com/266790296879", - "platform": "Facebook", - "platformId": "266790296879", - "verified": True, - }, - "summary": { - "loveCount": 284, - "threePlusMinuteVideoCount": 3, - "totalInteractionCount": 11343, - "wowCount": 436, - "thankfulCount": 0, - "interactionRate": 0.008526067860395113, - "likeCount": 4732, - "hahaCount": 812, - "commentCount": 1565, - "shareCount": 2536, - "sadCount": 711, - "angryCount": 267, - "totalVideoTimeMS": 645754, - "postCount": 45, - }, - "breakdown": { - "owned_video": { - "loveCount": 7, - "threePlusMinuteVideoCount": 3, - "totalInteractionCount": 284, - "wowCount": 8, - "thankfulCount": 0, - "interactionRate": 0.0031803586463378594, - "likeCount": 186, - "hahaCount": 2, - "commentCount": 16, - "shareCount": 65, - "sadCount": 0, - "angryCount": 0, - "totalVideoTimeMS": 645754, - "postCount": 3, - }, - "crosspost": { - "loveCount": 7, - "threePlusMinuteVideoCount": 3, - "totalInteractionCount": 284, - "wowCount": 8, - "thankfulCount": 0, - "interactionRate": 0.0031803586463378594, - "likeCount": 186, - "hahaCount": 2, - "commentCount": 16, - "shareCount": 65, - "sadCount": 0, - "angryCount": 0, - "totalVideoTimeMS": 645754, - "postCount": 3, - }, - "link": { - "shareCount": 2304, - "loveCount": 260, - "totalInteractionCount": 10139, - "wowCount": 401, - "sadCount": 581, - "angryCount": 238, - "thankfulCount": 0, - "postCount": 40, - "interactionRate": 0.008559901463015728, - "likeCount": 4109, - "hahaCount": 793, - "commentCount": 1453, - }, - "photo": { - "shareCount": 167, - "loveCount": 17, - "totalInteractionCount": 920, - "wowCount": 27, - "sadCount": 130, - "angryCount": 29, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.01556345720548314, - "likeCount": 437, - "hahaCount": 17, - "commentCount": 96, - }, - }, - "subscriberData": {"initialCount": 2955474, "finalCount": 2955809}, - }, - { - "account": { - "id": 48738, - "name": "MRCTV", - "handle": "mrctv", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22788750_1672001339489574_3161704397036092004_n.png?_nc_cat=1&_nc_oc=AQnzYjYzWh8Ym4hvIw-6PRgWMQ8rhpJPlJayuXuhwN8Mr623OiX9U49qXT5nl-JMBTI&_nc_ht=scontent.xx&oh=d2d096127dd32ca06b4a0144b43f5673&oe=5E1233A4", - "subscriberCount": 3452690, - "url": "https://www.facebook.com/201956993160690", - "platform": "Facebook", - "platformId": "201956993160690", - "verified": True, - }, - "summary": { - "loveCount": 96, - "totalInteractionCount": 11058, - "wowCount": 156, - "thankfulCount": 0, - "interactionRate": 0.02667444405795624, - "likeCount": 1653, - "hahaCount": 3724, - "commentCount": 2709, - "shareCount": 1490, - "sadCount": 131, - "angryCount": 1099, - "totalVideoTimeMS": 55337, - "postCount": 12, - }, - "breakdown": { - "owned_video": { - "loveCount": 3, - "totalInteractionCount": 1529, - "wowCount": 13, - "thankfulCount": 0, - "interactionRate": 0.04428363188340401, - "likeCount": 231, - "hahaCount": 455, - "commentCount": 139, - "shareCount": 517, - "sadCount": 7, - "angryCount": 164, - "totalVideoTimeMS": 55337, - "postCount": 1, - }, - "crosspost": { - "loveCount": 3, - "totalInteractionCount": 1529, - "wowCount": 13, - "thankfulCount": 0, - "interactionRate": 0.04428363188340401, - "likeCount": 231, - "hahaCount": 455, - "commentCount": 139, - "shareCount": 517, - "sadCount": 7, - "angryCount": 164, - "totalVideoTimeMS": 55337, - "postCount": 1, - }, - "link": { - "shareCount": 973, - "loveCount": 93, - "totalInteractionCount": 9529, - "wowCount": 143, - "sadCount": 124, - "angryCount": 935, - "thankfulCount": 0, - "postCount": 11, - "interactionRate": 0.02508150765927264, - "likeCount": 1422, - "hahaCount": 3269, - "commentCount": 2570, - }, - }, - "subscriberData": {"initialCount": 3452796, "finalCount": 3452690}, - }, - { - "account": { - "id": 10323, - "name": "Reuters", - "handle": "Reuters", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/51325614_2292147310805612_3874403780548100096_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlLN3v5RKOKT6LVQj--bvulAczkWupv1AuwaG14c3MkOAyF9oLoLGad6n1Rl6FhN6k&_nc_ht=scontent.xx&oh=73deaf953fbb14e82a9c92b2f850db23&oe=5E0ACADC", - "subscriberCount": 4154522, - "url": "https://www.facebook.com/114050161948682", - "platform": "Facebook", - "platformId": "114050161948682", - "verified": True, - }, - "summary": { - "shareCount": 1261, - "loveCount": 737, - "totalInteractionCount": 10822, - "wowCount": 338, - "sadCount": 383, - "angryCount": 226, - "thankfulCount": 0, - "postCount": 65, - "interactionRate": 0.003995766413272492, - "likeCount": 5427, - "hahaCount": 561, - "commentCount": 1889, - }, - "breakdown": { - "native_video": { - "shareCount": 517, - "loveCount": 257, - "totalInteractionCount": 3437, - "wowCount": 136, - "sadCount": 69, - "angryCount": 47, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.041353775289169524, - "likeCount": 1610, - "hahaCount": 22, - "commentCount": 779, - }, - "owned_video": { - "shareCount": 517, - "loveCount": 257, - "totalInteractionCount": 3437, - "wowCount": 136, - "sadCount": 69, - "angryCount": 47, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.041353775289169524, - "likeCount": 1610, - "hahaCount": 22, - "commentCount": 779, - }, - "link": { - "shareCount": 744, - "loveCount": 480, - "totalInteractionCount": 7385, - "wowCount": 202, - "sadCount": 314, - "angryCount": 179, - "thankfulCount": 0, - "postCount": 63, - "interactionRate": 0.0028162931948968765, - "likeCount": 3817, - "hahaCount": 539, - "commentCount": 1110, - }, - }, - "subscriberData": {"initialCount": 4154272, "finalCount": 4154522}, - }, - { - "account": { - "id": 3927, - "name": "Daily Kos", - "handle": "dailykos", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/69689928_10157802442554255_4623408062214963200_n.jpg?_nc_cat=1&_nc_oc=AQmiFQCe9HEd-7S8-YBXHpZ33b_hLFcVPCNB05lp1enuXRMKyX6Abs1TL5YJfZH8Z8M&_nc_ht=scontent.xx&oh=8492c84250d2d1c80f95be3523063b3b&oe=5DF8500E", - "subscriberCount": 1321301, - "url": "https://www.facebook.com/43179984254", - "platform": "Facebook", - "platformId": "43179984254", - "verified": True, - }, - "summary": { - "shareCount": 2484, - "loveCount": 1084, - "totalInteractionCount": 10447, - "wowCount": 361, - "sadCount": 62, - "angryCount": 2277, - "thankfulCount": 0, - "postCount": 12, - "interactionRate": 0.06584419447196362, - "likeCount": 3305, - "hahaCount": 203, - "commentCount": 671, - }, - "breakdown": { - "link": { - "shareCount": 2484, - "loveCount": 1084, - "totalInteractionCount": 10447, - "wowCount": 361, - "sadCount": 62, - "angryCount": 2277, - "thankfulCount": 0, - "postCount": 12, - "interactionRate": 0.06584419447196362, - "likeCount": 3305, - "hahaCount": 203, - "commentCount": 671, - } - }, - "subscriberData": {"initialCount": 1321301, "finalCount": 1321301}, - }, - { - "account": { - "id": 7202, - "name": "Washington Free Beacon", - "handle": "FreeBeacon", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/24176871_1495617353820616_2845320948710215498_n.png?_nc_cat=106&_nc_oc=AQnbg3-OcKtFvYeoc0i6GQ70lXyqvHbqPfUtkh_KFVS0k6oj3r-LxtkTIuKuAo2QRgs&_nc_ht=scontent.xx&oh=8831d07df2000939df5c7d0225ca20ff&oe=5DF909CE", - "subscriberCount": 740989, - "url": "https://www.facebook.com/282024895179874", - "platform": "Facebook", - "platformId": "282024895179874", - "verified": True, - }, - "summary": { - "shareCount": 2690, - "loveCount": 10, - "totalInteractionCount": 10222, - "wowCount": 90, - "sadCount": 60, - "angryCount": 3237, - "thankfulCount": 0, - "postCount": 7, - "interactionRate": 0.19703183683604658, - "likeCount": 553, - "hahaCount": 880, - "commentCount": 2702, - }, - "breakdown": { - "link": { - "shareCount": 2690, - "loveCount": 10, - "totalInteractionCount": 10222, - "wowCount": 90, - "sadCount": 60, - "angryCount": 3237, - "thankfulCount": 0, - "postCount": 7, - "interactionRate": 0.19703183683604658, - "likeCount": 553, - "hahaCount": 880, - "commentCount": 2702, - } - }, - "subscriberData": {"initialCount": 741005, "finalCount": 740989}, - }, - { - "account": { - "id": 7199, - "name": "Sean Hannity", - "handle": "SeanHannity", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/10156129_10154243847245389_4273859834170256486_n.jpg?_nc_cat=1&_nc_oc=AQnKVAkiwo1ZI_RMY4AghlSvZHwn_Go__As4BkMehqcDTXHydiCFDIgYZxe2FAsrjWA&_nc_ht=scontent.xx&oh=f15e3e465f2a2934e585dfce76c17331&oe=5E0C747A", - "subscriberCount": 3206935, - "url": "https://www.facebook.com/69813760388", - "platform": "Facebook", - "platformId": "69813760388", - "verified": True, - }, - "summary": { - "loveCount": 362, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 9944, - "wowCount": 202, - "thankfulCount": 0, - "interactionRate": 0.028188294062528745, - "likeCount": 3629, - "hahaCount": 1594, - "commentCount": 1923, - "shareCount": 881, - "sadCount": 56, - "angryCount": 1297, - "totalVideoTimeMS": 1004775, - "postCount": 11, - }, - "breakdown": { - "native_video": { - "loveCount": 210, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 2456, - "wowCount": 7, - "thankfulCount": 0, - "interactionRate": 0.0765823564353657, - "likeCount": 1801, - "hahaCount": 20, - "commentCount": 172, - "shareCount": 241, - "sadCount": 1, - "angryCount": 4, - "totalVideoTimeMS": 498857, - "postCount": 1, - }, - "owned_video": { - "loveCount": 221, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 3779, - "wowCount": 45, - "thankfulCount": 0, - "interactionRate": 0.05890230916384602, - "likeCount": 2420, - "hahaCount": 334, - "commentCount": 312, - "shareCount": 430, - "sadCount": 11, - "angryCount": 6, - "totalVideoTimeMS": 1004775, - "postCount": 2, - }, - "crosspost": { - "loveCount": 11, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 1323, - "wowCount": 38, - "thankfulCount": 0, - "interactionRate": 0.04125344363354594, - "likeCount": 619, - "hahaCount": 314, - "commentCount": 140, - "shareCount": 189, - "sadCount": 10, - "angryCount": 2, - "totalVideoTimeMS": 505918, - "postCount": 1, - }, - "link": { - "shareCount": 451, - "loveCount": 141, - "totalInteractionCount": 6165, - "wowCount": 157, - "sadCount": 45, - "angryCount": 1291, - "thankfulCount": 0, - "postCount": 9, - "interactionRate": 0.02135949273543384, - "likeCount": 1209, - "hahaCount": 1260, - "commentCount": 1611, - }, - }, - "subscriberData": {"initialCount": 3207075, "finalCount": 3206935}, - }, - { - "account": { - "id": 59767, - "name": "AJ+", - "handle": "ajplusenglish", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/10675686_452503314891181_8657239428083336114_n.png?_nc_cat=1&_nc_oc=AQlFJD0M_GXyy5E5WBzjW8pOgbfwUdlw6gX0sO4_XgBSfMLbj_1QRzoAuC_OSE3H0tM&_nc_ht=scontent.xx&oh=5d0c3ababdcf031637161a928fbc1086&oe=5DCA0751", - "subscriberCount": 11131099, - "url": "https://www.facebook.com/407570359384477", - "platform": "Facebook", - "platformId": "407570359384477", - "verified": True, - }, - "summary": { - "loveCount": 173, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 9604, - "wowCount": 66, - "thankfulCount": 0, - "interactionRate": 0.014373987251531032, - "likeCount": 2120, - "hahaCount": 121, - "commentCount": 560, - "shareCount": 3416, - "sadCount": 2136, - "angryCount": 1012, - "totalVideoTimeMS": 1831347, - "postCount": 6, - }, - "breakdown": { - "native_video": { - "loveCount": 8, - "totalInteractionCount": 7586, - "wowCount": 40, - "thankfulCount": 0, - "interactionRate": 0.03407533352816075, - "likeCount": 1155, - "hahaCount": 28, - "commentCount": 343, - "shareCount": 3052, - "sadCount": 2079, - "angryCount": 881, - "totalVideoTimeMS": 193318, - "postCount": 2, - }, - "owned_video": { - "loveCount": 173, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 9604, - "wowCount": 66, - "thankfulCount": 0, - "interactionRate": 0.014373987251531032, - "likeCount": 2120, - "hahaCount": 121, - "commentCount": 560, - "shareCount": 3416, - "sadCount": 2136, - "angryCount": 1012, - "totalVideoTimeMS": 1831347, - "postCount": 6, - }, - "crosspost": { - "loveCount": 165, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 2018, - "wowCount": 26, - "thankfulCount": 0, - "interactionRate": 0.004527805984232275, - "likeCount": 965, - "hahaCount": 93, - "commentCount": 217, - "shareCount": 364, - "sadCount": 57, - "angryCount": 131, - "totalVideoTimeMS": 1638029, - "postCount": 4, - }, - }, - "subscriberData": {"initialCount": 11131339, "finalCount": 11131099}, - }, - { - "account": { - "id": 10284, - "name": "The New Yorker", - "handle": "newyorker", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1174822_10151878824588869_2070117374_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQno9Opk1N_2uuxM9xMCbaLh-8w7vk3rWYzY5iX2B0axGmTGyU1kkZY1RTndOiqUuAE&_nc_ht=scontent.xx&oh=e4a5a2194344ddb52a1e83254332bea3&oe=5DC7CED7", - "subscriberCount": 4287325, - "url": "https://www.facebook.com/9258148868", - "platform": "Facebook", - "platformId": "9258148868", - "verified": True, - }, - "summary": { - "shareCount": 2613, - "loveCount": 230, - "totalInteractionCount": 9559, - "wowCount": 653, - "sadCount": 413, - "angryCount": 921, - "thankfulCount": 0, - "postCount": 26, - "interactionRate": 0.008560272893102834, - "likeCount": 3747, - "hahaCount": 322, - "commentCount": 660, - }, - "breakdown": { - "link": { - "shareCount": 2613, - "loveCount": 230, - "totalInteractionCount": 9559, - "wowCount": 653, - "sadCount": 413, - "angryCount": 921, - "thankfulCount": 0, - "postCount": 26, - "interactionRate": 0.008560272893102834, - "likeCount": 3747, - "hahaCount": 322, - "commentCount": 660, - } - }, - "subscriberData": {"initialCount": 4287168, "finalCount": 4287325}, - }, - { - "account": { - "id": 16406, - "name": "TIME", - "handle": "time", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10372522_10152195008896491_2022604163270194960_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkWdxZsjSYaIb0mLyEuq2eqxcvi6f98JCGl-qbaXtMNC9m3vw71t4X881vCIALzL7I&_nc_ht=scontent.xx&oh=01fa11e373104495d36fd77682a53514&oe=5E0D29D6", - "subscriberCount": 12917303, - "url": "https://www.facebook.com/10606591490", - "platform": "Facebook", - "platformId": "10606591490", - "verified": True, - }, - "summary": { - "loveCount": 627, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 9096, - "wowCount": 211, - "thankfulCount": 0, - "interactionRate": 0.002601108714848309, - "likeCount": 4794, - "hahaCount": 202, - "commentCount": 787, - "shareCount": 1858, - "sadCount": 576, - "angryCount": 41, - "totalVideoTimeMS": 846327, - "postCount": 27, - }, - "breakdown": { - "native_video": { - "loveCount": 8, - "totalInteractionCount": 375, - "wowCount": 7, - "thankfulCount": 0, - "interactionRate": 0.0014476408621328387, - "likeCount": 127, - "hahaCount": 5, - "commentCount": 38, - "shareCount": 53, - "sadCount": 137, - "angryCount": 0, - "totalVideoTimeMS": 208401, - "postCount": 2, - }, - "owned_video": { - "loveCount": 34, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 1204, - "wowCount": 91, - "thankfulCount": 0, - "interactionRate": 0.001331519937362825, - "likeCount": 474, - "hahaCount": 15, - "commentCount": 163, - "shareCount": 249, - "sadCount": 168, - "angryCount": 10, - "totalVideoTimeMS": 846327, - "postCount": 7, - }, - "crosspost": { - "loveCount": 26, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 829, - "wowCount": 84, - "thankfulCount": 0, - "interactionRate": 0.0012773301724701518, - "likeCount": 347, - "hahaCount": 10, - "commentCount": 125, - "shareCount": 196, - "sadCount": 31, - "angryCount": 10, - "totalVideoTimeMS": 637926, - "postCount": 5, - }, - "link": { - "shareCount": 1609, - "loveCount": 593, - "totalInteractionCount": 7892, - "wowCount": 120, - "sadCount": 408, - "angryCount": 31, - "thankfulCount": 0, - "postCount": 20, - "interactionRate": 0.003050109623959029, - "likeCount": 4320, - "hahaCount": 187, - "commentCount": 624, - }, - }, - "subscriberData": {"initialCount": 12917834, "finalCount": 12917303}, - }, - { - "account": { - "id": 765761, - "name": "The Blacksphere", - "handle": "theblacksphere.net", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/18119422_10154519215947596_8821577492859645295_n.png?_nc_cat=105&_nc_oc=AQkg0Hf-P17_kRiBglxNNjSQbh-fwZuFfcRNPqUcPH7EQalrULeQ4iA16aZvUqiWsvg&_nc_ht=scontent.xx&oh=44f59681d9adc96cbc0688df3addc493&oe=5E13F29A", - "subscriberCount": 577927, - "url": "https://www.facebook.com/49867377595", - "platform": "Facebook", - "platformId": "49867377595", - "verified": False, - }, - "summary": { - "shareCount": 2420, - "loveCount": 41, - "totalInteractionCount": 8827, - "wowCount": 243, - "sadCount": 123, - "angryCount": 1467, - "thankfulCount": 0, - "postCount": 19, - "interactionRate": 0.08028452559025996, - "likeCount": 1529, - "hahaCount": 1682, - "commentCount": 1322, - }, - "breakdown": { - "link": { - "shareCount": 1780, - "loveCount": 6, - "totalInteractionCount": 6313, - "wowCount": 240, - "sadCount": 121, - "angryCount": 1466, - "thankfulCount": 0, - "postCount": 18, - "interactionRate": 0.06055944818230816, - "likeCount": 535, - "hahaCount": 878, - "commentCount": 1287, - }, - "photo": { - "shareCount": 640, - "loveCount": 35, - "totalInteractionCount": 2514, - "wowCount": 3, - "sadCount": 2, - "angryCount": 1, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.4349898649437792, - "likeCount": 994, - "hahaCount": 804, - "commentCount": 35, - }, - }, - "subscriberData": {"initialCount": 577962, "finalCount": 577927}, - }, - { - "account": { - "id": 16340, - "name": "NewsBusters.org", - "handle": "newsbusters", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22894025_10156065135861178_6465017462954076657_n.png?_nc_cat=1&_nc_oc=AQkwnX53WKVe23PNYVstnqmppMCnDaKdcHy8l_Lt9nOPecFbnoIjtVzWaEHsJ9zjqHo&_nc_ht=scontent.xx&oh=6f2998202b70329d6ff64e16c4e48a8f&oe=5E13E3DA", - "subscriberCount": 2919313, - "url": "https://www.facebook.com/6333396177", - "platform": "Facebook", - "platformId": "6333396177", - "verified": True, - }, - "summary": { - "loveCount": 249, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 8748, - "wowCount": 165, - "thankfulCount": 0, - "interactionRate": 0.029938181423324176, - "likeCount": 3436, - "hahaCount": 1751, - "commentCount": 1661, - "shareCount": 1165, - "sadCount": 79, - "angryCount": 242, - "totalVideoTimeMS": 386345, - "postCount": 10, - }, - "breakdown": { - "owned_video": { - "loveCount": 6, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 815, - "wowCount": 12, - "thankfulCount": 0, - "interactionRate": 0.027917182906189015, - "likeCount": 170, - "hahaCount": 276, - "commentCount": 186, - "shareCount": 122, - "sadCount": 6, - "angryCount": 37, - "totalVideoTimeMS": 386345, - "postCount": 1, - }, - "crosspost": { - "loveCount": 6, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 815, - "wowCount": 12, - "thankfulCount": 0, - "interactionRate": 0.027917182906189015, - "likeCount": 170, - "hahaCount": 276, - "commentCount": 186, - "shareCount": 122, - "sadCount": 6, - "angryCount": 37, - "totalVideoTimeMS": 386345, - "postCount": 1, - }, - "link": { - "shareCount": 1043, - "loveCount": 243, - "totalInteractionCount": 7933, - "wowCount": 153, - "sadCount": 73, - "angryCount": 205, - "thankfulCount": 0, - "postCount": 9, - "interactionRate": 0.030177960908408005, - "likeCount": 3266, - "hahaCount": 1475, - "commentCount": 1475, - }, - }, - "subscriberData": {"initialCount": 2919385, "finalCount": 2919313}, - }, - { - "account": { - "id": 44528, - "name": "Vox", - "handle": "Vox", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15327441_612869972233942_727410529402189533_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQnoAo-srh87mkvD-DKqEDzFi4nn14JVBUE8HPqgKgoKz2LtUzKnd7p6NTRpO6WA_Gg&_nc_ht=scontent.xx&oh=ffdab33a30a7adbfde40574c198f8580&oe=5DF8E26D", - "subscriberCount": 2426279, - "url": "https://www.facebook.com/223649167822693", - "platform": "Facebook", - "platformId": "223649167822693", - "verified": True, - }, - "summary": { - "loveCount": 319, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 8598, - "wowCount": 288, - "thankfulCount": 0, - "interactionRate": 0.016857786844620047, - "likeCount": 3431, - "hahaCount": 163, - "commentCount": 653, - "shareCount": 1691, - "sadCount": 329, - "angryCount": 1724, - "totalVideoTimeMS": 265115, - "postCount": 21, - }, - "breakdown": { - "owned_video": { - "loveCount": 47, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 761, - "wowCount": 20, - "thankfulCount": 0, - "interactionRate": 0.01566249144487926, - "likeCount": 443, - "hahaCount": 5, - "commentCount": 54, - "shareCount": 172, - "sadCount": 19, - "angryCount": 1, - "totalVideoTimeMS": 265115, - "postCount": 2, - }, - "crosspost": { - "loveCount": 47, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 761, - "wowCount": 20, - "thankfulCount": 0, - "interactionRate": 0.01566249144487926, - "likeCount": 443, - "hahaCount": 5, - "commentCount": 54, - "shareCount": 172, - "sadCount": 19, - "angryCount": 1, - "totalVideoTimeMS": 265115, - "postCount": 2, - }, - "link": { - "shareCount": 1519, - "loveCount": 272, - "totalInteractionCount": 7837, - "wowCount": 268, - "sadCount": 310, - "angryCount": 1723, - "thankfulCount": 0, - "postCount": 19, - "interactionRate": 0.016981438092869096, - "likeCount": 2988, - "hahaCount": 158, - "commentCount": 599, - }, - }, - "subscriberData": {"initialCount": 2426078, "finalCount": 2426279}, - }, - { - "account": { - "id": 327932, - "name": "Media Research Center", - "handle": "mediaresearchcenter", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22814117_10155267787348717_9035099093135610710_n.png?_nc_cat=1&_nc_oc=AQnlBU3OCfeS-5QWg2v10Je2qqRgOr8VQS088-pc6gM4VZ_wGRCLBF_h5ObNobn7SOE&_nc_ht=scontent.xx&oh=4444f21775a6df49bc0c533f492d5953&oe=5E0B587B", - "subscriberCount": 1853272, - "url": "https://www.facebook.com/10498053716", - "platform": "Facebook", - "platformId": "10498053716", - "verified": True, - }, - "summary": { - "loveCount": 195, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 8478, - "wowCount": 155, - "thankfulCount": 0, - "interactionRate": 0.03809474475981425, - "likeCount": 2225, - "hahaCount": 1599, - "commentCount": 1820, - "shareCount": 1380, - "sadCount": 405, - "angryCount": 699, - "totalVideoTimeMS": 386345, - "postCount": 12, - }, - "breakdown": { - "owned_video": { - "loveCount": 4, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 511, - "wowCount": 4, - "thankfulCount": 0, - "interactionRate": 0.027572825173180004, - "likeCount": 124, - "hahaCount": 162, - "commentCount": 98, - "shareCount": 78, - "sadCount": 5, - "angryCount": 36, - "totalVideoTimeMS": 386345, - "postCount": 1, - }, - "crosspost": { - "loveCount": 4, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 511, - "wowCount": 4, - "thankfulCount": 0, - "interactionRate": 0.027572825173180004, - "likeCount": 124, - "hahaCount": 162, - "commentCount": 98, - "shareCount": 78, - "sadCount": 5, - "angryCount": 36, - "totalVideoTimeMS": 386345, - "postCount": 1, - }, - "link": { - "shareCount": 1302, - "loveCount": 191, - "totalInteractionCount": 7967, - "wowCount": 151, - "sadCount": 400, - "angryCount": 663, - "thankfulCount": 0, - "postCount": 11, - "interactionRate": 0.03906599887550357, - "likeCount": 2101, - "hahaCount": 1437, - "commentCount": 1722, - }, - }, - "subscriberData": {"initialCount": 1853276, "finalCount": 1853272}, - }, - { - "account": { - "id": 546413, - "name": "PJ Media", - "handle": "PJMedia", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/11233498_10153918103746159_4425260475851381266_n.jpg?_nc_cat=1&_nc_oc=AQlsQcaTBN0IYmuAz9KhN7jR3MPlfGRQ6pQx6vtSV9AWa6eNztotI3-NTLX1xGzJ6zE&_nc_ht=scontent.xx&oh=15f625aebc03c1c0e428efec7e19fab3&oe=5E04568A", - "subscriberCount": 345163, - "url": "https://www.facebook.com/15418366158", - "platform": "Facebook", - "platformId": "15418366158", - "verified": True, - }, - "summary": { - "shareCount": 2584, - "loveCount": 46, - "totalInteractionCount": 8327, - "wowCount": 270, - "sadCount": 158, - "angryCount": 1748, - "thankfulCount": 0, - "postCount": 17, - "interactionRate": 0.14167568436743544, - "likeCount": 1001, - "hahaCount": 842, - "commentCount": 1678, - }, - "breakdown": { - "link": { - "shareCount": 2584, - "loveCount": 46, - "totalInteractionCount": 8327, - "wowCount": 270, - "sadCount": 158, - "angryCount": 1748, - "thankfulCount": 0, - "postCount": 17, - "interactionRate": 0.14167568436743544, - "likeCount": 1001, - "hahaCount": 842, - "commentCount": 1678, - } - }, - "subscriberData": {"initialCount": 345146, "finalCount": 345163}, - }, - { - "account": { - "id": 1165716, - "name": "Don Lemon CNN", - "handle": "donlemoncnn", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/18057083_1290293507691218_2466909210169106234_n.jpg?_nc_cat=1&_nc_oc=AQnVGMIs0e2R-6KqoSknArDh_vaQ99IJS6k-qf8dfQ8_11ygvd9iWteTgaDp-e325os&_nc_ht=scontent.xx&oh=041b801f1ae88fb46a42c942efe1775f&oe=5E089245", - "subscriberCount": 638421, - "url": "https://www.facebook.com/1009804299073475", - "platform": "Facebook", - "platformId": "1009804299073475", - "verified": True, - }, - "summary": { - "loveCount": 606, - "threePlusMinuteVideoCount": 5, - "totalInteractionCount": 7887, - "wowCount": 170, - "thankfulCount": 0, - "interactionRate": 0.24702226176527717, - "likeCount": 3047, - "hahaCount": 345, - "commentCount": 1274, - "shareCount": 1548, - "sadCount": 412, - "angryCount": 485, - "totalVideoTimeMS": 1615012, - "postCount": 5, - }, - "breakdown": { - "owned_video": { - "loveCount": 606, - "threePlusMinuteVideoCount": 5, - "totalInteractionCount": 7887, - "wowCount": 170, - "thankfulCount": 0, - "interactionRate": 0.24702226176527717, - "likeCount": 3047, - "hahaCount": 345, - "commentCount": 1274, - "shareCount": 1548, - "sadCount": 412, - "angryCount": 485, - "totalVideoTimeMS": 1615012, - "postCount": 5, - }, - "crosspost": { - "loveCount": 606, - "threePlusMinuteVideoCount": 5, - "totalInteractionCount": 7887, - "wowCount": 170, - "thankfulCount": 0, - "interactionRate": 0.24702226176527717, - "likeCount": 3047, - "hahaCount": 345, - "commentCount": 1274, - "shareCount": 1548, - "sadCount": 412, - "angryCount": 485, - "totalVideoTimeMS": 1615012, - "postCount": 5, - }, - }, - "subscriberData": {"initialCount": 638387, "finalCount": 638421}, - }, - { - "account": { - "id": 3998, - "name": "The Daily Show", - "handle": "thedailyshow", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/48368386_10157135026436800_3277713629201301504_n.jpg?_nc_cat=1&_nc_oc=AQnBJgtgBUq5JxUf7vm9iBepQKcPv9l_LwUxs4ocWc-OZKl6FlGJg8q-djlalcukEM8&_nc_ht=scontent.xx&oh=b8d75b4499115eea65b8a9e51bec6c43&oe=5DFF9198", - "subscriberCount": 8586527, - "url": "https://www.facebook.com/7976226799", - "platform": "Facebook", - "platformId": "7976226799", - "verified": True, - }, - "summary": { - "loveCount": 335, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 7864, - "wowCount": 117, - "thankfulCount": 0, - "interactionRate": 0.015257449959494964, - "likeCount": 3817, - "hahaCount": 2383, - "commentCount": 398, - "shareCount": 720, - "sadCount": 75, - "angryCount": 19, - "totalVideoTimeMS": 842933, - "postCount": 6, - }, - "breakdown": { - "native_video": { - "loveCount": 142, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 3132, - "wowCount": 46, - "thankfulCount": 0, - "interactionRate": 0.012159372334131864, - "likeCount": 1621, - "hahaCount": 879, - "commentCount": 184, - "shareCount": 240, - "sadCount": 15, - "angryCount": 5, - "totalVideoTimeMS": 357647, - "postCount": 3, - }, - "owned_video": { - "loveCount": 327, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 5907, - "wowCount": 68, - "thankfulCount": 0, - "interactionRate": 0.013754998780277522, - "likeCount": 3013, - "hahaCount": 1567, - "commentCount": 279, - "shareCount": 577, - "sadCount": 59, - "angryCount": 17, - "totalVideoTimeMS": 842933, - "postCount": 5, - }, - "crosspost": { - "loveCount": 185, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 2775, - "wowCount": 22, - "thankfulCount": 0, - "interactionRate": 0.01615426190367902, - "likeCount": 1392, - "hahaCount": 688, - "commentCount": 95, - "shareCount": 337, - "sadCount": 44, - "angryCount": 12, - "totalVideoTimeMS": 485286, - "postCount": 2, - }, - "photo": { - "shareCount": 143, - "loveCount": 8, - "totalInteractionCount": 1957, - "wowCount": 49, - "sadCount": 16, - "angryCount": 2, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.022792999672314233, - "likeCount": 804, - "hahaCount": 816, - "commentCount": 119, - }, - }, - "subscriberData": {"initialCount": 8585412, "finalCount": 8586527}, - }, - { - "account": { - "id": 28285, - "name": "Townhall.com", - "handle": "townhallcom", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/52313832_10158468618829657_401031722176610304_n.png?_nc_cat=1&_nc_oc=AQnJFJyepGbGHljkkVb93rI6SAbiEecWjNxe3C4rS2WFLS18RWTwjScIUwrrUrlWx9o&_nc_ht=scontent.xx&oh=ff685a6018b55bd19ae00229c9d617dc&oe=5DFD7EC4", - "subscriberCount": 1286652, - "url": "https://www.facebook.com/41632789656", - "platform": "Facebook", - "platformId": "41632789656", - "verified": True, - }, - "summary": { - "shareCount": 1371, - "loveCount": 144, - "totalInteractionCount": 7605, - "wowCount": 153, - "sadCount": 54, - "angryCount": 1036, - "thankfulCount": 0, - "postCount": 17, - "interactionRate": 0.03474032963676537, - "likeCount": 2585, - "hahaCount": 792, - "commentCount": 1470, - }, - "breakdown": { - "link": { - "shareCount": 1371, - "loveCount": 144, - "totalInteractionCount": 7605, - "wowCount": 153, - "sadCount": 54, - "angryCount": 1036, - "thankfulCount": 0, - "postCount": 17, - "interactionRate": 0.03474032963676537, - "likeCount": 2585, - "hahaCount": 792, - "commentCount": 1470, - } - }, - "subscriberData": {"initialCount": 1286726, "finalCount": 1286652}, - }, - { - "account": { - "id": 13493, - "name": "The Washington Times", - "handle": "TheWashingtonTimes", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/68701924_10158091236369411_4030630617484886016_n.png?_nc_cat=1&_nc_oc=AQn1GXVtNdWnXaGIEHrhLGQyOlmFDwbRgfVrqynMP9L2fBaUV5SpEChCSozGqc-wcGU&_nc_ht=scontent.xx&oh=6c68fde34ca8ee23b09a81bac28e0e22&oe=5DFDF501", - "subscriberCount": 652390, - "url": "https://www.facebook.com/35994014410", - "platform": "Facebook", - "platformId": "35994014410", - "verified": True, - }, - "summary": { - "shareCount": 1562, - "loveCount": 344, - "totalInteractionCount": 7423, - "wowCount": 118, - "sadCount": 74, - "angryCount": 810, - "thankfulCount": 0, - "postCount": 22, - "interactionRate": 0.05165621790646699, - "likeCount": 2254, - "hahaCount": 445, - "commentCount": 1816, - }, - "breakdown": { - "link": { - "shareCount": 1562, - "loveCount": 344, - "totalInteractionCount": 7423, - "wowCount": 118, - "sadCount": 74, - "angryCount": 810, - "thankfulCount": 0, - "postCount": 22, - "interactionRate": 0.05165621790646699, - "likeCount": 2254, - "hahaCount": 445, - "commentCount": 1816, - } - }, - "subscriberData": {"initialCount": 652390, "finalCount": 652390}, - }, - { - "account": { - "id": 816605, - "name": "WND", - "handle": "WNDNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/10616184_978685205477070_7301123703638589430_n.jpg?_nc_cat=110&_nc_oc=AQm5V5YpP7PucYw6lh5UcBTbvWDxAw3jNZpGGnOpem7RUhl7KQuT_0RFS9UItcAmqL8&_nc_ht=scontent.xx&oh=42799b825016837895356c7b53b45526&oe=5E0F6F64", - "subscriberCount": 847147, - "url": "https://www.facebook.com/119984188013847", - "platform": "Facebook", - "platformId": "119984188013847", - "verified": False, - }, - "summary": { - "shareCount": 1442, - "loveCount": 160, - "totalInteractionCount": 7394, - "wowCount": 75, - "sadCount": 65, - "angryCount": 866, - "thankfulCount": 0, - "postCount": 4, - "interactionRate": 0.21813919476182633, - "likeCount": 2703, - "hahaCount": 500, - "commentCount": 1583, - }, - "breakdown": { - "link": { - "shareCount": 1442, - "loveCount": 160, - "totalInteractionCount": 7394, - "wowCount": 75, - "sadCount": 65, - "angryCount": 866, - "thankfulCount": 0, - "postCount": 4, - "interactionRate": 0.21813919476182633, - "likeCount": 2703, - "hahaCount": 500, - "commentCount": 1583, - } - }, - "subscriberData": {"initialCount": 847184, "finalCount": 847147}, - }, - { - "account": { - "id": 19547, - "name": "CNBC", - "handle": "cnbc", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/58378702_10157689832569369_7768288312980144128_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkQWczZuEUuUs3yQw-GF-LxMEC4qqezMSlw7v7S2I6ANZbjmX4F6ZJhxCxnIUe_qJY&_nc_ht=scontent.xx&oh=dd93c8b01aefbe85d169013c4e19c5a5&oe=5E13A6DF", - "subscriberCount": 3112193, - "url": "https://www.facebook.com/97212224368", - "platform": "Facebook", - "platformId": "97212224368", - "verified": True, - }, - "summary": { - "loveCount": 219, - "totalInteractionCount": 7091, - "wowCount": 328, - "thankfulCount": 0, - "interactionRate": 0.00629798801775647, - "likeCount": 3504, - "hahaCount": 113, - "commentCount": 535, - "shareCount": 2351, - "sadCount": 22, - "angryCount": 19, - "totalVideoTimeMS": 36841, - "postCount": 36, - }, - "breakdown": { - "native_video": { - "loveCount": 0, - "totalInteractionCount": 26, - "wowCount": 0, - "thankfulCount": 0, - "interactionRate": 0.0008354473901105522, - "likeCount": 17, - "hahaCount": 1, - "commentCount": 3, - "shareCount": 5, - "sadCount": 0, - "angryCount": 0, - "totalVideoTimeMS": 36841, - "postCount": 1, - }, - "owned_video": { - "loveCount": 0, - "totalInteractionCount": 26, - "wowCount": 0, - "thankfulCount": 0, - "interactionRate": 0.0008354473901105522, - "likeCount": 17, - "hahaCount": 1, - "commentCount": 3, - "shareCount": 5, - "sadCount": 0, - "angryCount": 0, - "totalVideoTimeMS": 36841, - "postCount": 1, - }, - "link": { - "shareCount": 2346, - "loveCount": 219, - "totalInteractionCount": 7065, - "wowCount": 328, - "sadCount": 22, - "angryCount": 19, - "thankfulCount": 0, - "postCount": 35, - "interactionRate": 0.006458650977393115, - "likeCount": 3487, - "hahaCount": 112, - "commentCount": 532, - }, - }, - "subscriberData": {"initialCount": 3112017, "finalCount": 3112193}, - }, - { - "account": { - "id": 4003, - "name": "Mother Jones", - "handle": "motherjones", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13173930_10153754114402144_7216599223824347020_n.jpg?_nc_cat=1&_nc_oc=AQl6Am3t1g_ne4B7X5hq_O0i1VTjnEJF4vjbAn7XZtJx-l1BO3gpV20BzW2uwlbRLnA&_nc_ht=scontent.xx&oh=365d8d33c49b25f19609c36f1a951716&oe=5E10472B", - "subscriberCount": 1542486, - "url": "https://www.facebook.com/7642602143", - "platform": "Facebook", - "platformId": "7642602143", - "verified": True, - }, - "summary": { - "loveCount": 114, - "totalInteractionCount": 7080, - "wowCount": 391, - "thankfulCount": 0, - "interactionRate": 0.057374466611950226, - "likeCount": 2534, - "hahaCount": 94, - "commentCount": 495, - "shareCount": 1470, - "sadCount": 299, - "angryCount": 1683, - "totalVideoTimeMS": 154259, - "postCount": 8, - }, - "breakdown": { - "native_video": { - "loveCount": 38, - "totalInteractionCount": 313, - "wowCount": 1, - "thankfulCount": 0, - "interactionRate": 0.020291760507955277, - "likeCount": 206, - "hahaCount": 2, - "commentCount": 15, - "shareCount": 51, - "sadCount": 0, - "angryCount": 0, - "totalVideoTimeMS": 154259, - "postCount": 1, - }, - "owned_video": { - "loveCount": 38, - "totalInteractionCount": 313, - "wowCount": 1, - "thankfulCount": 0, - "interactionRate": 0.020291760507955277, - "likeCount": 206, - "hahaCount": 2, - "commentCount": 15, - "shareCount": 51, - "sadCount": 0, - "angryCount": 0, - "totalVideoTimeMS": 154259, - "postCount": 1, - }, - "link": { - "shareCount": 1419, - "loveCount": 76, - "totalInteractionCount": 6767, - "wowCount": 390, - "sadCount": 299, - "angryCount": 1683, - "thankfulCount": 0, - "postCount": 7, - "interactionRate": 0.06262568897982364, - "likeCount": 2328, - "hahaCount": 92, - "commentCount": 480, - }, - }, - "subscriberData": {"initialCount": 1542510, "finalCount": 1542486}, - }, - { - "account": { - "id": 4004, - "name": "The Last Word With Lawrence O'Donnell", - "handle": "thelastword", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16114622_1184240434964134_5160717321521180833_n.png?_nc_cat=1&_nc_oc=AQkE59Us5gvqt0N90qJZW6XSCRHGK5YGgwcB-G1YctCjO7mmMEWXfrnnaX-jZYV633o&_nc_ht=scontent.xx&oh=eaa0c18d2823fe813960f06f60585643&oe=5E08F8C6", - "subscriberCount": 515865, - "url": "https://www.facebook.com/114945745226947", - "platform": "Facebook", - "platformId": "114945745226947", - "verified": True, - }, - "summary": { - "loveCount": 292, - "totalInteractionCount": 6972, - "wowCount": 346, - "thankfulCount": 0, - "interactionRate": 0.13511286867688252, - "likeCount": 1822, - "hahaCount": 111, - "commentCount": 534, - "shareCount": 1632, - "sadCount": 233, - "angryCount": 2002, - "totalVideoTimeMS": 164000, - "postCount": 10, - }, - "breakdown": { - "native_video": { - "loveCount": 7, - "totalInteractionCount": 1348, - "wowCount": 111, - "thankfulCount": 0, - "interactionRate": 0.26130867571942273, - "likeCount": 268, - "hahaCount": 8, - "commentCount": 136, - "shareCount": 355, - "sadCount": 19, - "angryCount": 444, - "totalVideoTimeMS": 164000, - "postCount": 1, - }, - "owned_video": { - "loveCount": 7, - "totalInteractionCount": 1348, - "wowCount": 111, - "thankfulCount": 0, - "interactionRate": 0.26130867571942273, - "likeCount": 268, - "hahaCount": 8, - "commentCount": 136, - "shareCount": 355, - "sadCount": 19, - "angryCount": 444, - "totalVideoTimeMS": 164000, - "postCount": 1, - }, - "link": { - "shareCount": 1277, - "loveCount": 285, - "totalInteractionCount": 5624, - "wowCount": 235, - "sadCount": 214, - "angryCount": 1558, - "thankfulCount": 0, - "postCount": 9, - "interactionRate": 0.12096187956151319, - "likeCount": 1554, - "hahaCount": 103, - "commentCount": 398, - }, - }, - "subscriberData": {"initialCount": 515865, "finalCount": 515865}, - }, - { - "account": { - "id": 70882, - "name": "Beto O'Rourke", - "handle": "betoorourke", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/26047358_1510626285653743_4735771441297721924_n.jpg?_nc_cat=1&_nc_oc=AQmnnmWuN4ETlzHyBrSSQHdk1_CRKiq7bJlyFjoZ-RIKGey-i9IGXLUGIDVsarFy_m0&_nc_ht=scontent.xx&oh=8289827910a7251c17e5ce8b047c6fd9&oe=5E075DA7", - "subscriberCount": 917049, - "url": "https://www.facebook.com/223055747744143", - "platform": "Facebook", - "platformId": "223055747744143", - "verified": True, - }, - "summary": { - "loveCount": 1449, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 6715, - "wowCount": 78, - "thankfulCount": 0, - "interactionRate": 0.1829766306783548, - "likeCount": 3751, - "hahaCount": 54, - "commentCount": 711, - "shareCount": 655, - "sadCount": 7, - "angryCount": 10, - "totalVideoTimeMS": 434812, - "postCount": 4, - }, - "breakdown": { - "native_video": { - "loveCount": 1124, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 4794, - "wowCount": 56, - "thankfulCount": 0, - "interactionRate": 0.17425307260072165, - "likeCount": 2396, - "hahaCount": 43, - "commentCount": 617, - "shareCount": 551, - "sadCount": 2, - "angryCount": 5, - "totalVideoTimeMS": 434812, - "postCount": 3, - }, - "owned_video": { - "loveCount": 1124, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 4794, - "wowCount": 56, - "thankfulCount": 0, - "interactionRate": 0.17425307260072165, - "likeCount": 2396, - "hahaCount": 43, - "commentCount": 617, - "shareCount": 551, - "sadCount": 2, - "angryCount": 5, - "totalVideoTimeMS": 434812, - "postCount": 3, - }, - "photo": { - "shareCount": 104, - "loveCount": 325, - "totalInteractionCount": 1921, - "wowCount": 22, - "sadCount": 5, - "angryCount": 5, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.20947443833916538, - "likeCount": 1355, - "hahaCount": 11, - "commentCount": 94, - }, - }, - "subscriberData": {"initialCount": 917065, "finalCount": 917049}, - }, - { - "account": { - "id": 802304, - "name": "LifeSiteNews.com", - "handle": "LifeSiteNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/14045774_10154523214678203_3249696568170902881_n.png?_nc_cat=109&_nc_oc=AQkYiOUyuVFI9ABzfNjSP2Yx1Fi0sRpuYE0U7JBYB2UtkJwykb2_lWjO7RuW_QHhoN8&_nc_ht=scontent.xx&oh=f0d83796cec1106eb43ee96d151e21dd&oe=5E12B767", - "subscriberCount": 199458, - "url": "https://www.facebook.com/112623813202", - "platform": "Facebook", - "platformId": "112623813202", - "verified": False, - }, - "summary": { - "shareCount": 2888, - "loveCount": 58, - "totalInteractionCount": 6534, - "wowCount": 138, - "sadCount": 620, - "angryCount": 1661, - "thankfulCount": 0, - "postCount": 11, - "interactionRate": 0.2978242285525191, - "likeCount": 730, - "hahaCount": 26, - "commentCount": 413, - }, - "breakdown": { - "link": { - "shareCount": 2888, - "loveCount": 58, - "totalInteractionCount": 6534, - "wowCount": 138, - "sadCount": 620, - "angryCount": 1661, - "thankfulCount": 0, - "postCount": 11, - "interactionRate": 0.2978242285525191, - "likeCount": 730, - "hahaCount": 26, - "commentCount": 413, - } - }, - "subscriberData": {"initialCount": 199435, "finalCount": 199458}, - }, - { - "account": { - "id": 1434569, - "name": "Stand Up America", - "handle": "StandUpAmerica", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/46836898_641805796289575_5665799734510485504_n.png?_nc_cat=104&_nc_oc=AQnhfgvoFjvWj9dXVn0PbdHHi5khWt7WAJw6v0sooYifJY9u5dJtlNL7DqCIjlReKss&_nc_ht=scontent.xx&oh=4aba644629bd7f2680e9f2fb7ffbd864&oe=5E018897", - "subscriberCount": 1217170, - "url": "https://www.facebook.com/169597416843751", - "platform": "Facebook", - "platformId": "169597416843751", - "verified": True, - }, - "summary": { - "shareCount": 1745, - "loveCount": 4, - "totalInteractionCount": 6327, - "wowCount": 191, - "sadCount": 99, - "angryCount": 3060, - "thankfulCount": 0, - "postCount": 3, - "interactionRate": 0.1732679368052646, - "likeCount": 599, - "hahaCount": 14, - "commentCount": 615, - }, - "breakdown": { - "link": { - "shareCount": 1745, - "loveCount": 4, - "totalInteractionCount": 6327, - "wowCount": 191, - "sadCount": 99, - "angryCount": 3060, - "thankfulCount": 0, - "postCount": 3, - "interactionRate": 0.1732679368052646, - "likeCount": 599, - "hahaCount": 14, - "commentCount": 615, - } - }, - "subscriberData": {"initialCount": 1217210, "finalCount": 1217170}, - }, - { - "account": { - "id": 6631, - "name": "Slate.com", - "handle": "Slate", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/26815412_10155867835401438_6786592847511925697_n.jpg?_nc_cat=1&_nc_oc=AQnlPqxpF8HJHZLBBP9M3JCvr7KRojNU13Gek2aIDlLStNh3FwBSADznEiZCEG1_doE&_nc_ht=scontent.xx&oh=fa5bf2320fbcba9484de00ac7f908e6c&oe=5DC8F5CA", - "subscriberCount": 1518896, - "url": "https://www.facebook.com/21516776437", - "platform": "Facebook", - "platformId": "21516776437", - "verified": True, - }, - "summary": { - "shareCount": 1119, - "loveCount": 44, - "totalInteractionCount": 6254, - "wowCount": 231, - "sadCount": 188, - "angryCount": 2143, - "thankfulCount": 0, - "postCount": 27, - "interactionRate": 0.01520832441791949, - "likeCount": 1175, - "hahaCount": 410, - "commentCount": 944, - }, - "breakdown": { - "link": { - "shareCount": 1119, - "loveCount": 44, - "totalInteractionCount": 6254, - "wowCount": 231, - "sadCount": 188, - "angryCount": 2143, - "thankfulCount": 0, - "postCount": 27, - "interactionRate": 0.01520832441791949, - "likeCount": 1175, - "hahaCount": 410, - "commentCount": 944, - } - }, - "subscriberData": {"initialCount": 1518914, "finalCount": 1518896}, - }, - { - "account": { - "id": 10334, - "name": "Forbes", - "handle": "forbes", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/50813627_10157101876612509_8264332807757627392_n.jpg?_nc_cat=1&_nc_oc=AQnAYl9Z16OLHbDWE7a-lxG8OIKUVGaLmLLun6kzNd5lYoeiHsDE7pDBktqBmspzHnQ&_nc_ht=scontent.xx&oh=90b9d2de38f9b99c2b632764adb156c1&oe=5E020D31", - "subscriberCount": 5732199, - "url": "https://www.facebook.com/30911162508", - "platform": "Facebook", - "platformId": "30911162508", - "verified": True, - }, - "summary": { - "loveCount": 265, - "totalInteractionCount": 6197, - "wowCount": 185, - "thankfulCount": 0, - "interactionRate": 0.002076058587071162, - "likeCount": 3423, - "hahaCount": 248, - "commentCount": 691, - "shareCount": 1240, - "sadCount": 57, - "angryCount": 88, - "totalVideoTimeMS": 179004, - "postCount": 52, - }, - "breakdown": { - "native_video": { - "loveCount": 4, - "totalInteractionCount": 41, - "wowCount": 1, - "thankfulCount": 0, - "interactionRate": 0.00034891740959179193, - "likeCount": 22, - "hahaCount": 0, - "commentCount": 3, - "shareCount": 11, - "sadCount": 0, - "angryCount": 0, - "totalVideoTimeMS": 179004, - "postCount": 2, - }, - "owned_video": { - "loveCount": 4, - "totalInteractionCount": 41, - "wowCount": 1, - "thankfulCount": 0, - "interactionRate": 0.00034891740959179193, - "likeCount": 22, - "hahaCount": 0, - "commentCount": 3, - "shareCount": 11, - "sadCount": 0, - "angryCount": 0, - "totalVideoTimeMS": 179004, - "postCount": 2, - }, - "link": { - "shareCount": 1059, - "loveCount": 206, - "totalInteractionCount": 5269, - "wowCount": 179, - "sadCount": 56, - "angryCount": 88, - "thankfulCount": 0, - "postCount": 48, - "interactionRate": 0.001901599882275266, - "likeCount": 2759, - "hahaCount": 247, - "commentCount": 675, - }, - "photo": { - "shareCount": 170, - "loveCount": 55, - "totalInteractionCount": 887, - "wowCount": 5, - "sadCount": 1, - "angryCount": 0, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.007728520622458192, - "likeCount": 642, - "hahaCount": 1, - "commentCount": 13, - }, - }, - "subscriberData": {"initialCount": 5731832, "finalCount": 5732199}, - }, - { - "account": { - "id": 10335, - "name": "The Wall Street Journal", - "handle": "wsj", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/26734229_10157192613173128_6286097899182572387_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkg3dR3V2rO72fdcQNc6Kdupv3fYH3-VXio9SvAwKULEi36QT0vhIKN0_FvohpQCGs&_nc_ht=scontent.xx&oh=f550584e1e7adab86d889e32b7468801&oe=5DFE7FE9", - "subscriberCount": 6360356, - "url": "https://www.facebook.com/8304333127", - "platform": "Facebook", - "platformId": "8304333127", - "verified": True, - }, - "summary": { - "shareCount": 636, - "loveCount": 159, - "totalInteractionCount": 6154, - "wowCount": 194, - "sadCount": 373, - "angryCount": 61, - "thankfulCount": 0, - "postCount": 39, - "interactionRate": 0.002468462250215597, - "likeCount": 2807, - "hahaCount": 621, - "commentCount": 1303, - }, - "breakdown": { - "link": { - "shareCount": 636, - "loveCount": 159, - "totalInteractionCount": 6154, - "wowCount": 194, - "sadCount": 373, - "angryCount": 61, - "thankfulCount": 0, - "postCount": 39, - "interactionRate": 0.002468462250215597, - "likeCount": 2807, - "hahaCount": 621, - "commentCount": 1303, - } - }, - "subscriberData": {"initialCount": 6360114, "finalCount": 6360356}, - }, - { - "account": { - "id": 18756, - "name": "BuzzFeed News", - "handle": "BuzzFeedNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/37324661_1987747984579543_6544772647132069888_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQl4xuZMtXJ6qFqyRhwKzfdvsAYA1JGI1ajz4X8q4bIHiObnrMGyXWEFiDcVxaVlrgM&_nc_ht=scontent.xx&oh=3a3c2ae104e50e8860b8dcf413215500&oe=5DFB7022", - "subscriberCount": 3017031, - "url": "https://www.facebook.com/618786471475708", - "platform": "Facebook", - "platformId": "618786471475708", - "verified": True, - }, - "summary": { - "loveCount": 495, - "totalInteractionCount": 6041, - "wowCount": 145, - "thankfulCount": 0, - "interactionRate": 0.011106491694664973, - "likeCount": 2448, - "hahaCount": 567, - "commentCount": 881, - "shareCount": 880, - "sadCount": 156, - "angryCount": 469, - "totalVideoTimeMS": 230968, - "postCount": 18, - }, - "breakdown": { - "native_video": { - "loveCount": 310, - "totalInteractionCount": 3054, - "wowCount": 45, - "thankfulCount": 0, - "interactionRate": 0.03375047326916102, - "likeCount": 1169, - "hahaCount": 437, - "commentCount": 587, - "shareCount": 503, - "sadCount": 3, - "angryCount": 0, - "totalVideoTimeMS": 164111, - "postCount": 3, - }, - "owned_video": { - "loveCount": 310, - "totalInteractionCount": 3118, - "wowCount": 46, - "thankfulCount": 0, - "interactionRate": 0.025826737403414964, - "likeCount": 1185, - "hahaCount": 438, - "commentCount": 591, - "shareCount": 517, - "sadCount": 27, - "angryCount": 4, - "totalVideoTimeMS": 230968, - "postCount": 4, - }, - "crosspost": { - "loveCount": 0, - "totalInteractionCount": 64, - "wowCount": 1, - "thankfulCount": 0, - "interactionRate": 0.0021218372192792784, - "likeCount": 16, - "hahaCount": 1, - "commentCount": 4, - "shareCount": 14, - "sadCount": 24, - "angryCount": 4, - "totalVideoTimeMS": 66857, - "postCount": 1, - }, - "link": { - "shareCount": 363, - "loveCount": 185, - "totalInteractionCount": 2923, - "wowCount": 99, - "sadCount": 129, - "angryCount": 465, - "thankfulCount": 0, - "postCount": 14, - "interactionRate": 0.006895970962657654, - "likeCount": 1263, - "hahaCount": 129, - "commentCount": 290, - }, - }, - "subscriberData": {"initialCount": 3015477, "finalCount": 3017031}, - }, - { - "account": { - "id": 7781, - "name": "The Daily Beast", - "handle": "thedailybeast", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/18447180_10155420999849203_1942956350622474660_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlsvWaYHxyRC2B3NwwmVoV1kpqGNvYkkSxSr_lFopmdwhj-uerxTWu7CmbWz-8Qq-Q&_nc_ht=scontent.xx&oh=86caf840e49b739e6381c591317aab4b&oe=5DC85150", - "subscriberCount": 2163118, - "url": "https://www.facebook.com/37763684202", - "platform": "Facebook", - "platformId": "37763684202", - "verified": True, - }, - "summary": { - "shareCount": 1041, - "loveCount": 103, - "totalInteractionCount": 6002, - "wowCount": 358, - "sadCount": 205, - "angryCount": 1163, - "thankfulCount": 0, - "postCount": 22, - "interactionRate": 0.012574188288761612, - "likeCount": 1906, - "hahaCount": 338, - "commentCount": 888, - }, - "breakdown": { - "link": { - "shareCount": 1041, - "loveCount": 103, - "totalInteractionCount": 6002, - "wowCount": 358, - "sadCount": 205, - "angryCount": 1163, - "thankfulCount": 0, - "postCount": 22, - "interactionRate": 0.012574188288761612, - "likeCount": 1906, - "hahaCount": 338, - "commentCount": 888, - } - }, - "subscriberData": {"initialCount": 2163205, "finalCount": 2163118}, - }, - { - "account": { - "id": 38713, - "name": "C-SPAN", - "handle": "CSPAN", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/55510789_10157680504275579_8831454901000208384_n.jpg?_nc_cat=1&_nc_oc=AQmjwnGr1J3r3CGT1XgTUKOfeg_fD4sEbdS9k1LsQLmt6p7MuMZttOOafaHCnbTbRLM&_nc_ht=scontent.xx&oh=6cbbd30189f614a83fd0cd8c71f48f2a&oe=5DF5A55A", - "subscriberCount": 1225429, - "url": "https://www.facebook.com/21472760578", - "platform": "Facebook", - "platformId": "21472760578", - "verified": True, - }, - "summary": { - "shareCount": 128, - "loveCount": 59, - "totalInteractionCount": 5649, - "wowCount": 10, - "sadCount": 2, - "angryCount": 3, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.4609752126879572, - "likeCount": 456, - "hahaCount": 104, - "commentCount": 4887, - }, - "breakdown": { - "status": { - "shareCount": 128, - "loveCount": 59, - "totalInteractionCount": 5649, - "wowCount": 10, - "sadCount": 2, - "angryCount": 3, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.4609752126879572, - "likeCount": 456, - "hahaCount": 104, - "commentCount": 4887, - } - }, - "subscriberData": {"initialCount": 1225462, "finalCount": 1225429}, - }, - { - "account": { - "id": 40861, - "name": "AM Joy on MSNBC", - "handle": "amjoyshow", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16649094_1410994648972224_5741405830257531775_n.jpg?_nc_cat=1&_nc_oc=AQmHBK8cydrfov5x4YKdJPorNZH_m9SRBtjQ0frg7JykmFvhVgN67G0HMg73UoT_mto&_nc_ht=scontent.xx&oh=5eb86ad6a838433c9a88a570da2d9885&oe=5E0A5DB1", - "subscriberCount": 337795, - "url": "https://www.facebook.com/598356233569407", - "platform": "Facebook", - "platformId": "598356233569407", - "verified": True, - }, - "summary": { - "loveCount": 548, - "totalInteractionCount": 5198, - "wowCount": 93, - "thankfulCount": 0, - "interactionRate": 0.15364985005077253, - "likeCount": 1681, - "hahaCount": 28, - "commentCount": 1488, - "shareCount": 536, - "sadCount": 46, - "angryCount": 778, - "totalVideoTimeMS": 55935, - "postCount": 10, - }, - "breakdown": { - "native_video": { - "loveCount": 430, - "totalInteractionCount": 2407, - "wowCount": 4, - "thankfulCount": 0, - "interactionRate": 0.35614791832577913, - "likeCount": 650, - "hahaCount": 19, - "commentCount": 1121, - "shareCount": 179, - "sadCount": 1, - "angryCount": 3, - "totalVideoTimeMS": 55935, - "postCount": 2, - }, - "owned_video": { - "loveCount": 430, - "totalInteractionCount": 2407, - "wowCount": 4, - "thankfulCount": 0, - "interactionRate": 0.35614791832577913, - "likeCount": 650, - "hahaCount": 19, - "commentCount": 1121, - "shareCount": 179, - "sadCount": 1, - "angryCount": 3, - "totalVideoTimeMS": 55935, - "postCount": 2, - }, - "link": { - "shareCount": 342, - "loveCount": 16, - "totalInteractionCount": 2003, - "wowCount": 89, - "sadCount": 45, - "angryCount": 775, - "thankfulCount": 0, - "postCount": 6, - "interactionRate": 0.09858458587072688, - "likeCount": 414, - "hahaCount": 8, - "commentCount": 314, - }, - "photo": { - "shareCount": 15, - "loveCount": 102, - "totalInteractionCount": 788, - "wowCount": 0, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.1166436241233225, - "likeCount": 617, - "hahaCount": 1, - "commentCount": 53, - }, - }, - "subscriberData": {"initialCount": 337767, "finalCount": 337795}, - }, - { - "account": { - "id": 8830, - "name": "Team Coco", - "handle": "teamcoco", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/27545533_1749701865088688_7429648788456974231_n.png?_nc_cat=1&_nc_oc=AQmIkkiMMABDhk0_zoAtgXNkZTDYcJ8uW8Ig76pvppQhEs1OyF4zSAtAZzFS14OC94g&_nc_ht=scontent.xx&oh=f04325a386158ffaa5df7dafbfea7d7b&oe=5E014FF4", - "subscriberCount": 3980440, - "url": "https://www.facebook.com/108905269168364", - "platform": "Facebook", - "platformId": "108905269168364", - "verified": True, - }, - "summary": { - "loveCount": 239, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 5162, - "wowCount": 4, - "thankfulCount": 0, - "interactionRate": 0.04321130327300499, - "likeCount": 2364, - "hahaCount": 1573, - "commentCount": 200, - "shareCount": 778, - "sadCount": 4, - "angryCount": 0, - "totalVideoTimeMS": 784551, - "postCount": 3, - }, - "breakdown": { - "owned_video": { - "loveCount": 221, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 4937, - "wowCount": 4, - "thankfulCount": 0, - "interactionRate": 0.06200319562661414, - "likeCount": 2188, - "hahaCount": 1563, - "commentCount": 186, - "shareCount": 771, - "sadCount": 4, - "angryCount": 0, - "totalVideoTimeMS": 784551, - "postCount": 2, - }, - "crosspost": { - "loveCount": 221, - "threePlusMinuteVideoCount": 2, - "totalInteractionCount": 4937, - "wowCount": 4, - "thankfulCount": 0, - "interactionRate": 0.06200319562661414, - "likeCount": 2188, - "hahaCount": 1563, - "commentCount": 186, - "shareCount": 771, - "sadCount": 4, - "angryCount": 0, - "totalVideoTimeMS": 784551, - "postCount": 2, - }, - "link": { - "shareCount": 7, - "loveCount": 18, - "totalInteractionCount": 225, - "wowCount": 0, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.005652641416526816, - "likeCount": 176, - "hahaCount": 10, - "commentCount": 14, - }, - }, - "subscriberData": {"initialCount": 3980440, "finalCount": 3980440}, - }, - { - "account": { - "id": 10336, - "name": "Los Angeles Times", - "handle": "latimes", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/42232523_10156934290463010_1867037960201830400_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQkCUNuoErCZDtJ7bdvHC31FqbecsabwlNKsH2wceWr2Jv2z66F-V6pFJXzp8Z9dp5o&_nc_ht=scontent.xx&oh=9ac9785c391e9dbbdff486ee74ea0ac5&oe=5DF49524", - "subscriberCount": 2755671, - "url": "https://www.facebook.com/5863113009", - "platform": "Facebook", - "platformId": "5863113009", - "verified": True, - }, - "summary": { - "loveCount": 101, - "totalInteractionCount": 4506, - "wowCount": 223, - "thankfulCount": 0, - "interactionRate": 0.003520024037772398, - "likeCount": 1519, - "hahaCount": 235, - "commentCount": 753, - "shareCount": 674, - "sadCount": 511, - "angryCount": 490, - "totalVideoTimeMS": 509063, - "postCount": 46, - }, - "breakdown": { - "native_video": { - "loveCount": 17, - "totalInteractionCount": 243, - "wowCount": 7, - "thankfulCount": 0, - "interactionRate": 0.0012338228586006345, - "likeCount": 124, - "hahaCount": 5, - "commentCount": 39, - "shareCount": 24, - "sadCount": 27, - "angryCount": 0, - "totalVideoTimeMS": 509063, - "postCount": 7, - }, - "owned_video": { - "loveCount": 17, - "totalInteractionCount": 243, - "wowCount": 7, - "thankfulCount": 0, - "interactionRate": 0.0012338228586006345, - "likeCount": 124, - "hahaCount": 5, - "commentCount": 39, - "shareCount": 24, - "sadCount": 27, - "angryCount": 0, - "totalVideoTimeMS": 509063, - "postCount": 7, - }, - "link": { - "shareCount": 647, - "loveCount": 83, - "totalInteractionCount": 4246, - "wowCount": 216, - "sadCount": 484, - "angryCount": 490, - "thankfulCount": 0, - "postCount": 38, - "interactionRate": 0.004028068744255013, - "likeCount": 1383, - "hahaCount": 230, - "commentCount": 713, - }, - "photo": { - "shareCount": 3, - "loveCount": 1, - "totalInteractionCount": 17, - "wowCount": 0, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.0006169114293003172, - "likeCount": 12, - "hahaCount": 0, - "commentCount": 1, - }, - }, - "subscriberData": {"initialCount": 2755655, "finalCount": 2755671}, - }, - { - "account": { - "id": 115499, - "name": "Todd Starnes", - "handle": "ToddStarnesFNC", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/46920198_2335144319893720_2455881966508048384_n.jpg?_nc_cat=109&_nc_oc=AQlVsotrCFkbQxUVhqk_a20GnVwI4eyMwE1FyELHA5EaL9-KdatZ3bMmtSFKeyQE0jk&_nc_ht=scontent.xx&oh=ece111e79c6914b50e86197d088ecff7&oe=5DFD2E1C", - "subscriberCount": 265905, - "url": "https://www.facebook.com/128334087241432", - "platform": "Facebook", - "platformId": "128334087241432", - "verified": True, - }, - "summary": { - "shareCount": 998, - "loveCount": 250, - "totalInteractionCount": 4169, - "wowCount": 46, - "sadCount": 2, - "angryCount": 67, - "thankfulCount": 0, - "postCount": 4, - "interactionRate": 0.3918854882020655, - "likeCount": 2412, - "hahaCount": 84, - "commentCount": 310, - }, - "breakdown": { - "link": { - "shareCount": 998, - "loveCount": 250, - "totalInteractionCount": 4169, - "wowCount": 46, - "sadCount": 2, - "angryCount": 67, - "thankfulCount": 0, - "postCount": 4, - "interactionRate": 0.3918854882020655, - "likeCount": 2412, - "hahaCount": 84, - "commentCount": 310, - } - }, - "subscriberData": {"initialCount": 265883, "finalCount": 265905}, - }, - { - "account": { - "id": 6895, - "name": "Hot Air", - "handle": "hotaircom", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1918240_103971746305177_1971637_n.jpg?_nc_cat=105&_nc_oc=AQmq3wr_U32ihNNbx8lpwEGbZjWoKbczxWgDl2LGkBuBl9j9-tyWq8Sc1J2iHA8jRWY&_nc_ht=scontent.xx&oh=86e1f495f452096fe58cf9743e825f16&oe=5E0A5E0D", - "subscriberCount": 785694, - "url": "https://www.facebook.com/103971336305218", - "platform": "Facebook", - "platformId": "103971336305218", - "verified": True, - }, - "summary": { - "shareCount": 599, - "loveCount": 19, - "totalInteractionCount": 4066, - "wowCount": 39, - "sadCount": 81, - "angryCount": 765, - "thankfulCount": 0, - "postCount": 17, - "interactionRate": 0.03041811545138447, - "likeCount": 508, - "hahaCount": 583, - "commentCount": 1472, - }, - "breakdown": { - "link": { - "shareCount": 599, - "loveCount": 19, - "totalInteractionCount": 4066, - "wowCount": 39, - "sadCount": 81, - "angryCount": 765, - "thankfulCount": 0, - "postCount": 17, - "interactionRate": 0.03041811545138447, - "likeCount": 508, - "hahaCount": 583, - "commentCount": 1472, - } - }, - "subscriberData": {"initialCount": 785738, "finalCount": 785694}, - }, - { - "account": { - "id": 8015, - "name": "ACLU", - "handle": "aclu", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22089288_10154767565091813_1188859475219690990_n.png?_nc_cat=1&_nc_oc=AQlOc1ngKYC3GfwIgJztREf3PH5Hd4YwSsZ4g2b5SwQaWmUWfBbb44yyZco-2DLasTQ&_nc_ht=scontent.xx&oh=a2e38ee9d88c35289b7dc25ecab038b1&oe=5E0C98FE", - "subscriberCount": 2373681, - "url": "https://www.facebook.com/18982436812", - "platform": "Facebook", - "platformId": "18982436812", - "verified": True, - }, - "summary": { - "shareCount": 577, - "loveCount": 6, - "totalInteractionCount": 4012, - "wowCount": 158, - "sadCount": 495, - "angryCount": 1855, - "thankfulCount": 0, - "postCount": 3, - "interactionRate": 0.05632528293238606, - "likeCount": 765, - "hahaCount": 11, - "commentCount": 145, - }, - "breakdown": { - "link": { - "shareCount": 434, - "loveCount": 6, - "totalInteractionCount": 2894, - "wowCount": 140, - "sadCount": 385, - "angryCount": 1140, - "thankfulCount": 0, - "postCount": 2, - "interactionRate": 0.060959375021064056, - "likeCount": 681, - "hahaCount": 9, - "commentCount": 99, - }, - "photo": { - "shareCount": 143, - "loveCount": 0, - "totalInteractionCount": 1118, - "wowCount": 18, - "sadCount": 110, - "angryCount": 715, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.04709922686492717, - "likeCount": 84, - "hahaCount": 2, - "commentCount": 46, - }, - }, - "subscriberData": {"initialCount": 2373743, "finalCount": 2373681}, - }, - { - "account": { - "id": 48821, - "name": "Crooks and Liars", - "handle": "crooksandliars.site", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/399460_10150942179577183_1849263528_n.jpg?_nc_cat=103&_nc_oc=AQnr-v08LlItth11GcPpklOwQqPVr0pcraHt2N9z8qAYugvNo-dOrF98XGKQSUDfBqA&_nc_ht=scontent.xx&oh=72192ff8f806d8bc882b8fd212075eb4&oe=5DFA36B2", - "subscriberCount": 137575, - "url": "https://www.facebook.com/33455892182", - "platform": "Facebook", - "platformId": "33455892182", - "verified": False, - }, - "summary": { - "shareCount": 1433, - "loveCount": 39, - "totalInteractionCount": 3697, - "wowCount": 82, - "sadCount": 38, - "angryCount": 833, - "thankfulCount": 0, - "postCount": 14, - "interactionRate": 0.19191416202934689, - "likeCount": 647, - "hahaCount": 240, - "commentCount": 385, - }, - "breakdown": { - "link": { - "shareCount": 1433, - "loveCount": 39, - "totalInteractionCount": 3697, - "wowCount": 82, - "sadCount": 38, - "angryCount": 833, - "thankfulCount": 0, - "postCount": 14, - "interactionRate": 0.19191416202934689, - "likeCount": 647, - "hahaCount": 240, - "commentCount": 385, - } - }, - "subscriberData": {"initialCount": 137548, "finalCount": 137575}, - }, - { - "account": { - "id": 18808, - "name": "All In with Chris Hayes", - "handle": "allinwithchris", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10547485_345099262321772_6121447368826167660_n.jpg?_nc_cat=104&_nc_oc=AQlozD0cFDb_FbzaOqCikk-LgnOKbPkWtpTgtdkKGtLsFEVkknXg-KX1hj_61rRWnoo&_nc_ht=scontent.xx&oh=28aa3954be984198f12f1c3c1959f089&oe=5E047CA4", - "subscriberCount": 318658, - "url": "https://www.facebook.com/153005644864469", - "platform": "Facebook", - "platformId": "153005644864469", - "verified": True, - }, - "summary": { - "shareCount": 362, - "loveCount": 231, - "totalInteractionCount": 3399, - "wowCount": 66, - "sadCount": 37, - "angryCount": 174, - "thankfulCount": 0, - "postCount": 11, - "interactionRate": 0.09697996845797142, - "likeCount": 1609, - "hahaCount": 241, - "commentCount": 679, - }, - "breakdown": { - "link": { - "shareCount": 362, - "loveCount": 231, - "totalInteractionCount": 3399, - "wowCount": 66, - "sadCount": 37, - "angryCount": 174, - "thankfulCount": 0, - "postCount": 11, - "interactionRate": 0.09697996845797142, - "likeCount": 1609, - "hahaCount": 241, - "commentCount": 679, - } - }, - "subscriberData": {"initialCount": 318587, "finalCount": 318658}, - }, - { - "account": { - "id": 6803, - "name": "ATTN:", - "handle": "attn", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/55489107_2197559426946171_5392793883551727616_n.png?_nc_cat=1&_nc_oc=AQlFVtxJFIGlgUkYcw60Zkg1TJJE0DfLcxAkL46yxLXEzD3fwboZzZWSZrZb7ZRK1A0&_nc_ht=scontent.xx&oh=b8584181fa979fe9be2ba7ccdf83ee91&oe=5DF5C3E9", - "subscriberCount": 6115190, - "url": "https://www.facebook.com/160389977329803", - "platform": "Facebook", - "platformId": "160389977329803", - "verified": True, - }, - "summary": { - "loveCount": 234, - "totalInteractionCount": 3344, - "wowCount": 43, - "thankfulCount": 0, - "interactionRate": 0.013670719942465237, - "likeCount": 1581, - "hahaCount": 77, - "commentCount": 406, - "shareCount": 906, - "sadCount": 49, - "angryCount": 48, - "totalVideoTimeMS": 465869, - "postCount": 4, - }, - "breakdown": { - "share": { - "loveCount": 234, - "totalInteractionCount": 3344, - "wowCount": 43, - "thankfulCount": 0, - "interactionRate": 0.013670719942465237, - "likeCount": 1581, - "hahaCount": 77, - "commentCount": 406, - "shareCount": 906, - "sadCount": 49, - "angryCount": 48, - "totalVideoTimeMS": 465869, - "postCount": 4, - } - }, - "subscriberData": {"initialCount": 6115329, "finalCount": 6115190}, - }, - { - "account": { - "id": 10870, - "name": "Ron Wyden", - "handle": "wyden", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12190982_10153365721817858_6948350790870886697_n.jpg?_nc_cat=100&_nc_oc=AQlSBDMPg1ns4cB8WiToj9dg65G_2i3lMHXfEYQsHz1nRK02sr0h5MYdH7To6P7NX90&_nc_ht=scontent.xx&oh=3cd24adf4b8435744c4d1a54cfb318df&oe=5DF9B0D9", - "subscriberCount": 285891, - "url": "https://www.facebook.com/54787697857", - "platform": "Facebook", - "platformId": "54787697857", - "verified": True, - }, - "summary": { - "shareCount": 374, - "loveCount": 108, - "totalInteractionCount": 3320, - "wowCount": 12, - "sadCount": 14, - "angryCount": 192, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 1.1612533141190216, - "likeCount": 2267, - "hahaCount": 15, - "commentCount": 338, - }, - "breakdown": { - "status": { - "shareCount": 374, - "loveCount": 108, - "totalInteractionCount": 3320, - "wowCount": 12, - "sadCount": 14, - "angryCount": 192, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 1.1612533141190216, - "likeCount": 2267, - "hahaCount": 15, - "commentCount": 338, - } - }, - "subscriberData": {"initialCount": 285905, "finalCount": 285891}, - }, - { - "account": { - "id": 7201, - "name": "Reason Magazine", - "handle": "Reason.Magazine", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/25507745_10155330003029117_4400239766346429931_n.png?_nc_cat=1&_nc_oc=AQnagqHyUqN5W0ZyyHCUfDHPUMXM_4ac8YRBQE669TgxJkv1DFyTfv73gsTrX6mUrgY&_nc_ht=scontent.xx&oh=6cbfe90b09ec3e8192ff64359dc59e20&oe=5E023167", - "subscriberCount": 469507, - "url": "https://www.facebook.com/17548474116", - "platform": "Facebook", - "platformId": "17548474116", - "verified": True, - }, - "summary": { - "loveCount": 87, - "threePlusMinuteVideoCount": 3, - "totalInteractionCount": 3268, - "wowCount": 23, - "thankfulCount": 0, - "interactionRate": 0.057934836089142974, - "likeCount": 1670, - "hahaCount": 156, - "commentCount": 222, - "shareCount": 1025, - "sadCount": 49, - "angryCount": 36, - "totalVideoTimeMS": 2874551, - "postCount": 12, - }, - "breakdown": { - "owned_video": { - "loveCount": 43, - "threePlusMinuteVideoCount": 3, - "totalInteractionCount": 1015, - "wowCount": 6, - "thankfulCount": 0, - "interactionRate": 0.053887917391739606, - "likeCount": 475, - "hahaCount": 71, - "commentCount": 62, - "shareCount": 346, - "sadCount": 7, - "angryCount": 5, - "totalVideoTimeMS": 2874551, - "postCount": 4, - }, - "crosspost": { - "loveCount": 43, - "threePlusMinuteVideoCount": 3, - "totalInteractionCount": 1015, - "wowCount": 6, - "thankfulCount": 0, - "interactionRate": 0.053887917391739606, - "likeCount": 475, - "hahaCount": 71, - "commentCount": 62, - "shareCount": 346, - "sadCount": 7, - "angryCount": 5, - "totalVideoTimeMS": 2874551, - "postCount": 4, - }, - "link": { - "shareCount": 679, - "loveCount": 44, - "totalInteractionCount": 2253, - "wowCount": 17, - "sadCount": 42, - "angryCount": 31, - "thankfulCount": 0, - "postCount": 8, - "interactionRate": 0.059851797577386665, - "likeCount": 1195, - "hahaCount": 85, - "commentCount": 160, - }, - }, - "subscriberData": {"initialCount": 469479, "finalCount": 469507}, - }, - { - "account": { - "id": 17948, - "name": "The Tonight Show Starring Jimmy Fallon", - "handle": "FallonTonight", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/40960938_10156796565408896_7404903529636167680_n.jpg?_nc_cat=1&_nc_oc=AQnoOgepqCwTPL1-ppqLUJBEQ7SLONcvBAudjquT4-cSinjzmGe4Nh8daL32nyC7tkA&_nc_ht=scontent.xx&oh=61dd49b423492addb96c27c27f7ba2eb&oe=5E0F8A8D", - "subscriberCount": 14248140, - "url": "https://www.facebook.com/31732483895", - "platform": "Facebook", - "platformId": "31732483895", - "verified": True, - }, - "summary": { - "loveCount": 248, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 3200, - "wowCount": 186, - "thankfulCount": 0, - "interactionRate": 0.011229603311806535, - "likeCount": 2038, - "hahaCount": 242, - "commentCount": 138, - "shareCount": 338, - "sadCount": 4, - "angryCount": 6, - "totalVideoTimeMS": 693138, - "postCount": 2, - }, - "breakdown": { - "native_video": { - "loveCount": 38, - "totalInteractionCount": 537, - "wowCount": 3, - "thankfulCount": 0, - "interactionRate": 0.003768935611525068, - "likeCount": 283, - "hahaCount": 123, - "commentCount": 53, - "shareCount": 30, - "sadCount": 3, - "angryCount": 4, - "totalVideoTimeMS": 125459, - "postCount": 1, - }, - "owned_video": { - "loveCount": 248, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 3200, - "wowCount": 186, - "thankfulCount": 0, - "interactionRate": 0.011229603311806535, - "likeCount": 2038, - "hahaCount": 242, - "commentCount": 138, - "shareCount": 338, - "sadCount": 4, - "angryCount": 6, - "totalVideoTimeMS": 693138, - "postCount": 2, - }, - "crosspost": { - "loveCount": 210, - "threePlusMinuteVideoCount": 1, - "totalInteractionCount": 2663, - "wowCount": 183, - "thankfulCount": 0, - "interactionRate": 0.018690271012088002, - "likeCount": 1755, - "hahaCount": 119, - "commentCount": 85, - "shareCount": 308, - "sadCount": 1, - "angryCount": 2, - "totalVideoTimeMS": 567679, - "postCount": 1, - }, - }, - "subscriberData": {"initialCount": 14247969, "finalCount": 14248140}, - }, - { - "account": { - "id": 46610, - "name": "Chris Murphy", - "handle": "ChrisMurphyCT", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16508448_10154689090443961_3586091731088933149_n.jpg?_nc_cat=105&_nc_oc=AQlvuUbIcdD9Dkn7lKNRqcM8WBn2ExJPinOnC5BOuFt2fYoeJ29C3gMzj5V7Var7l64&_nc_ht=scontent.xx&oh=32d0568a8ff3afbe50618dcda76dd79e&oe=5E01E210", - "subscriberCount": 252316, - "url": "https://www.facebook.com/19437978960", - "platform": "Facebook", - "platformId": "19437978960", - "verified": True, - }, - "summary": { - "shareCount": 419, - "loveCount": 6, - "totalInteractionCount": 3133, - "wowCount": 26, - "sadCount": 1243, - "angryCount": 286, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 1.2416796957825298, - "likeCount": 908, - "hahaCount": 24, - "commentCount": 221, - }, - "breakdown": { - "photo": { - "shareCount": 419, - "loveCount": 6, - "totalInteractionCount": 3133, - "wowCount": 26, - "sadCount": 1243, - "angryCount": 286, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 1.2416796957825298, - "likeCount": 908, - "hahaCount": 24, - "commentCount": 221, - } - }, - "subscriberData": {"initialCount": 252323, "finalCount": 252316}, - }, - { - "account": { - "id": 370587, - "name": "Tea Party", - "handle": "teapartyorg", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10645152_1119529788064461_6831324369519464936_n.png?_nc_cat=109&_nc_oc=AQlYPwkxXVKsTPXKN2iEw6-kekm3w1t-TNKlGRez6lg5WNmUCadSHtPr1aKi7-vMXx0&_nc_ht=scontent.xx&oh=47cea2dd8d0821de871a1f427d4cc9c3&oe=5E051C4C", - "subscriberCount": 416797, - "url": "https://www.facebook.com/172526489431467", - "platform": "Facebook", - "platformId": "172526489431467", - "verified": True, - }, - "summary": { - "shareCount": 409, - "loveCount": 35, - "totalInteractionCount": 3120, - "wowCount": 67, - "sadCount": 29, - "angryCount": 480, - "thankfulCount": 0, - "postCount": 39, - "interactionRate": 0.01919339747126988, - "likeCount": 520, - "hahaCount": 495, - "commentCount": 1085, - }, - "breakdown": { - "link": { - "shareCount": 409, - "loveCount": 35, - "totalInteractionCount": 3120, - "wowCount": 67, - "sadCount": 29, - "angryCount": 480, - "thankfulCount": 0, - "postCount": 39, - "interactionRate": 0.01919339747126988, - "likeCount": 520, - "hahaCount": 495, - "commentCount": 1085, - } - }, - "subscriberData": {"initialCount": 416823, "finalCount": 416797}, - }, - { - "account": { - "id": 4007, - "name": "Truthout", - "handle": "truthout", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/19894613_10154795655481094_2383393652303893841_n.jpg?_nc_cat=110&_nc_oc=AQkTbiRpAD3hZBOdyzgT1PgAhh4VQwvgi7_UrWwWRSAE_kE9X6Vo3lxn9jEYjOQ71yY&_nc_ht=scontent.xx&oh=a8046506973cb0fdb4deab6119ed03f5&oe=5DF988DD", - "subscriberCount": 754194, - "url": "https://www.facebook.com/83865976093", - "platform": "Facebook", - "platformId": "83865976093", - "verified": True, - }, - "summary": { - "shareCount": 919, - "loveCount": 100, - "totalInteractionCount": 3082, - "wowCount": 97, - "sadCount": 58, - "angryCount": 644, - "thankfulCount": 0, - "postCount": 7, - "interactionRate": 0.058339614786175366, - "likeCount": 980, - "hahaCount": 86, - "commentCount": 198, - }, - "breakdown": { - "link": { - "shareCount": 919, - "loveCount": 100, - "totalInteractionCount": 3082, - "wowCount": 97, - "sadCount": 58, - "angryCount": 644, - "thankfulCount": 0, - "postCount": 7, - "interactionRate": 0.058339614786175366, - "likeCount": 980, - "hahaCount": 86, - "commentCount": 198, - } - }, - "subscriberData": {"initialCount": 754215, "finalCount": 754194}, - }, - { - "account": { - "id": 10662, - "name": "Senator Chuck Schumer", - "handle": "senschumer", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/c53.0.200.200a/p200x200/484164_10151474167894407_1415959450_n.png?_nc_cat=110&_nc_oc=AQmwM-QX_xbE0MV5qDszLzLb9AfcHcmbiFLZejE1imfSOMx9OawOnk_4NRNjQN6BQko&_nc_ht=scontent.xx&oh=b4eb0adf1c1c4d46d1933f6f4a5ffd87&oe=5DFB0E16", - "subscriberCount": 376381, - "url": "https://www.facebook.com/15771239406", - "platform": "Facebook", - "platformId": "15771239406", - "verified": True, - }, - "summary": { - "loveCount": 12, - "totalInteractionCount": 3032, - "wowCount": 61, - "thankfulCount": 0, - "interactionRate": 0.40277853424215326, - "likeCount": 712, - "hahaCount": 364, - "commentCount": 805, - "shareCount": 579, - "sadCount": 37, - "angryCount": 462, - "totalVideoTimeMS": 122878, - "postCount": 2, - }, - "breakdown": { - "native_video": { - "loveCount": 5, - "totalInteractionCount": 1405, - "wowCount": 17, - "thankfulCount": 0, - "interactionRate": 0.3732874938062173, - "likeCount": 300, - "hahaCount": 78, - "commentCount": 375, - "shareCount": 352, - "sadCount": 27, - "angryCount": 251, - "totalVideoTimeMS": 122878, - "postCount": 1, - }, - "owned_video": { - "loveCount": 5, - "totalInteractionCount": 1405, - "wowCount": 17, - "thankfulCount": 0, - "interactionRate": 0.3732874938062173, - "likeCount": 300, - "hahaCount": 78, - "commentCount": 375, - "shareCount": 352, - "sadCount": 27, - "angryCount": 251, - "totalVideoTimeMS": 122878, - "postCount": 1, - }, - "link": { - "shareCount": 227, - "loveCount": 7, - "totalInteractionCount": 1627, - "wowCount": 44, - "sadCount": 10, - "angryCount": 211, - "thankfulCount": 0, - "postCount": 1, - "interactionRate": 0.43226957467808935, - "likeCount": 412, - "hahaCount": 286, - "commentCount": 430, - }, - }, - "subscriberData": {"initialCount": 376390, "finalCount": 376381}, - }, - { - "account": { - "id": 9840, - "name": "CommonDreams", - "handle": "commondreams.org", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10469767_10152172973972016_8063428021861554001_n.jpg?_nc_cat=103&_nc_oc=AQlnGcnqptQQZZC2ssw_mVUJY3OL5CzMA_2hp5GQtIg_0HMCwMmn9q28KrUoNRmgbtU&_nc_ht=scontent.xx&oh=71d9a7a103ab2f477e27840eb92ac030&oe=5E040D0C", - "subscriberCount": 366664, - "url": "https://www.facebook.com/32109457015", - "platform": "Facebook", - "platformId": "32109457015", - "verified": True, - }, - "summary": { - "shareCount": 1051, - "loveCount": 177, - "totalInteractionCount": 3025, - "wowCount": 60, - "sadCount": 40, - "angryCount": 591, - "thankfulCount": 0, - "postCount": 8, - "interactionRate": 0.10309095595043452, - "likeCount": 923, - "hahaCount": 23, - "commentCount": 160, - }, - "breakdown": { - "link": { - "shareCount": 1051, - "loveCount": 177, - "totalInteractionCount": 3025, - "wowCount": 60, - "sadCount": 40, - "angryCount": 591, - "thankfulCount": 0, - "postCount": 8, - "interactionRate": 0.10309095595043452, - "likeCount": 923, - "hahaCount": 23, - "commentCount": 160, - } - }, - "subscriberData": {"initialCount": 366669, "finalCount": 366664}, - }, - ], - "pagination": {}, - }, + 'status': 200, + 'result': { + 'accountStatistics': [{ + 'account': { + 'id': 6897, + 'name': 'Fox News', + 'handle': 'FoxNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22519337_10156158270486336_6810712156586627746_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlXNhWwb8bfCyDXwZo8N1dsslewpEwDTilUVrDkK4ie4qoq_SHj__a9Ws-O0Hsa97M&_nc_ht=scontent.xx&oh=485819e2e49151fcf033722359d3e1a7&oe=5DFF0F55', + 'subscriberCount': 17163279, + 'url': 'https://www.facebook.com/15704546335', + 'platform': 'Facebook', + 'platformId': '15704546335', + 'verified': True + }, + 'summary': { + 'loveCount': 47232, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 572261, + 'wowCount': 22391, + 'thankfulCount': 0, + 'interactionRate': 0.0694641272581413, + 'likeCount': 226852, + 'hahaCount': 41859, + 'commentCount': 84712, + 'shareCount': 89770, + 'sadCount': 15405, + 'angryCount': 44040, + 'totalVideoTimeMS': 914338, + 'postCount': 48 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 1167, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 12545, + 'wowCount': 852, + 'thankfulCount': 0, + 'interactionRate': 0.018272060315511752, + 'likeCount': 6028, + 'hahaCount': 731, + 'commentCount': 903, + 'shareCount': 2654, + 'sadCount': 28, + 'angryCount': 182, + 'totalVideoTimeMS': 486605, + 'postCount': 4 + }, + 'owned_video': { + 'loveCount': 1347, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 23723, + 'wowCount': 916, + 'thankfulCount': 0, + 'interactionRate': 0.02303235153929144, + 'likeCount': 7203, + 'hahaCount': 3204, + 'commentCount': 6396, + 'shareCount': 2991, + 'sadCount': 109, + 'angryCount': 1557, + 'totalVideoTimeMS': 486605, + 'postCount': 6 + }, + 'crosspost': { + 'shareCount': 337, + 'loveCount': 180, + 'totalInteractionCount': 11178, + 'wowCount': 64, + 'sadCount': 81, + 'angryCount': 1375, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.03256458708654183, + 'likeCount': 1175, + 'hahaCount': 2473, + 'commentCount': 5493 + }, + 'link': { + 'shareCount': 59011, + 'loveCount': 19647, + 'totalInteractionCount': 299339, + 'wowCount': 19416, + 'sadCount': 10916, + 'angryCount': 29091, + 'thankfulCount': 0, + 'postCount': 35, + 'interactionRate': 0.049828654278780776, + 'likeCount': 105531, + 'hahaCount': 18405, + 'commentCount': 37322 + }, + 'photo': { + 'shareCount': 27536, + 'loveCount': 26159, + 'totalInteractionCount': 247051, + 'wowCount': 1991, + 'sadCount': 4208, + 'angryCount': 13375, + 'thankfulCount': 0, + 'postCount': 6, + 'interactionRate': 0.23990818988877438, + 'likeCount': 112630, + 'hahaCount': 20239, + 'commentCount': 40913 + }, + 'share': { + 'loveCount': 79, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 2148, + 'wowCount': 68, + 'thankfulCount': 0, + 'interactionRate': 0.01251542906815027, + 'likeCount': 1488, + 'hahaCount': 11, + 'commentCount': 81, + 'shareCount': 232, + 'sadCount': 172, + 'angryCount': 17, + 'totalVideoTimeMS': 427733, + 'postCount': 1 + } + }, + 'subscriberData': { + 'initialCount': 17162352, + 'finalCount': 17163279 + } + }, { + 'account': { + 'id': 12431, + 'name': 'Occupy Democrats', + 'handle': 'OccupyDemocrats', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/45537032_2403174426442264_4326005542884474880_n.png?_nc_cat=1&_nc_oc=AQk1aVoZuS5iEgQ8jJOAnRArWGeM1nOF7Qx0IOdloEmSEgqG2JK08hhJzKOuJJ3fHcw&_nc_ht=scontent.xx&oh=1020f841a2c9e7ceddc491ff0d6a83dc&oe=5E051B61', + 'subscriberCount': 7872091, + 'url': 'https://www.facebook.com/346937065399354', + 'platform': 'Facebook', + 'platformId': '346937065399354', + 'verified': True + }, + 'summary': { + 'loveCount': 11586, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 412395, + 'wowCount': 2699, + 'thankfulCount': 0, + 'interactionRate': 0.17463810331280705, + 'likeCount': 132321, + 'hahaCount': 74689, + 'commentCount': 27137, + 'shareCount': 103448, + 'sadCount': 37188, + 'angryCount': 23327, + 'totalVideoTimeMS': 446099, + 'postCount': 30 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 147, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 14936, + 'wowCount': 241, + 'thankfulCount': 0, + 'interactionRate': 0.18975663546341384, + 'likeCount': 1866, + 'hahaCount': 3389, + 'commentCount': 4775, + 'shareCount': 2215, + 'sadCount': 184, + 'angryCount': 2119, + 'totalVideoTimeMS': 446099, + 'postCount': 1 + }, + 'crosspost': { + 'loveCount': 147, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 14936, + 'wowCount': 241, + 'thankfulCount': 0, + 'interactionRate': 0.18975663546341384, + 'likeCount': 1866, + 'hahaCount': 3389, + 'commentCount': 4775, + 'shareCount': 2215, + 'sadCount': 184, + 'angryCount': 2119, + 'totalVideoTimeMS': 446099, + 'postCount': 1 + }, + 'link': { + 'shareCount': 4074, + 'loveCount': 1045, + 'totalInteractionCount': 24781, + 'wowCount': 678, + 'sadCount': 373, + 'angryCount': 3896, + 'thankfulCount': 0, + 'postCount': 13, + 'interactionRate': 0.024215060738702915, + 'likeCount': 9224, + 'hahaCount': 1646, + 'commentCount': 3845 + }, + 'photo': { + 'shareCount': 97159, + 'loveCount': 10394, + 'totalInteractionCount': 372678, + 'wowCount': 1780, + 'sadCount': 36631, + 'angryCount': 17312, + 'thankfulCount': 0, + 'postCount': 16, + 'interactionRate': 0.29591668138817856, + 'likeCount': 121231, + 'hahaCount': 69654, + 'commentCount': 18517 + } + }, + 'subscriberData': { + 'initialCount': 7870178, + 'finalCount': 7872091 + } + }, { + 'account': { + 'id': 8323, + 'name': 'CNN', + 'handle': 'cnn', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12289622_10154246192721509_1897912583584847639_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQnmWKpivHkplQlHvH6RU7ER1noSOq6saypKUuDbSnV0FWNYEYghmJGPxBpmhJO8UsU&_nc_ht=scontent.xx&oh=12e2b35de35132a27c2772d3fe565936&oe=5DF3AC02', + 'subscriberCount': 31389797, + 'url': 'https://www.facebook.com/5550296508', + 'platform': 'Facebook', + 'platformId': '5550296508', + 'verified': True + }, + 'summary': { + 'loveCount': 40425, + 'threePlusMinuteVideoCount': 19, + 'totalInteractionCount': 343516, + 'wowCount': 14626, + 'thankfulCount': 0, + 'interactionRate': 0.024317314415293154, + 'likeCount': 146903, + 'hahaCount': 15613, + 'commentCount': 32704, + 'shareCount': 65487, + 'sadCount': 19242, + 'angryCount': 8516, + 'totalVideoTimeMS': 5541666, + 'postCount': 45 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 6286, + 'threePlusMinuteVideoCount': 18, + 'totalInteractionCount': 101691, + 'wowCount': 3110, + 'thankfulCount': 0, + 'interactionRate': 0.01799666043914464, + 'likeCount': 43832, + 'hahaCount': 2764, + 'commentCount': 13059, + 'shareCount': 21216, + 'sadCount': 8587, + 'angryCount': 2837, + 'totalVideoTimeMS': 5302033, + 'postCount': 18 + }, + 'crosspost': { + 'loveCount': 6286, + 'threePlusMinuteVideoCount': 18, + 'totalInteractionCount': 101691, + 'wowCount': 3110, + 'thankfulCount': 0, + 'interactionRate': 0.01799666043914464, + 'likeCount': 43832, + 'hahaCount': 2764, + 'commentCount': 13059, + 'shareCount': 21216, + 'sadCount': 8587, + 'angryCount': 2837, + 'totalVideoTimeMS': 5302033, + 'postCount': 18 + }, + 'link': { + 'shareCount': 44085, + 'loveCount': 34111, + 'totalInteractionCount': 240792, + 'wowCount': 11485, + 'sadCount': 10649, + 'angryCount': 5675, + 'thankfulCount': 0, + 'postCount': 25, + 'interactionRate': 0.030682569780386266, + 'likeCount': 102525, + 'hahaCount': 12792, + 'commentCount': 19470 + }, + 'share': { + 'loveCount': 28, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 1033, + 'wowCount': 31, + 'thankfulCount': 0, + 'interactionRate': 0.0016438797639579806, + 'likeCount': 546, + 'hahaCount': 57, + 'commentCount': 175, + 'shareCount': 186, + 'sadCount': 6, + 'angryCount': 4, + 'totalVideoTimeMS': 239633, + 'postCount': 2 + } + }, + 'subscriberData': { + 'initialCount': 31388517, + 'finalCount': 31389797 + } + }, { + 'account': { + 'id': 6893, + 'name': 'Breitbart', + 'handle': 'Breitbart', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/52553630_10162546333950354_957315555642048512_n.jpg?_nc_cat=1&_nc_oc=AQk8nx3izP_SkwUd5mI2QxO4HjJhMpRt5ggDp60YEZ10o7ZdAbnqH-Zl61Z9gCXAUT8&_nc_ht=scontent.xx&oh=163e2b6e11a05fc602bdbfbe245a4a0e&oe=5E125439', + 'subscriberCount': 4051767, + 'url': 'https://www.facebook.com/95475020353', + 'platform': 'Facebook', + 'platformId': '95475020353', + 'verified': True + }, + 'summary': { + 'shareCount': 34233, + 'loveCount': 2121, + 'totalInteractionCount': 180913, + 'wowCount': 6217, + 'sadCount': 7025, + 'angryCount': 35424, + 'thankfulCount': 0, + 'postCount': 44, + 'interactionRate': 0.10146013999821069, + 'likeCount': 33695, + 'hahaCount': 21554, + 'commentCount': 40644 + }, + 'breakdown': { + 'link': { + 'shareCount': 32055, + 'loveCount': 1158, + 'totalInteractionCount': 168057, + 'wowCount': 6134, + 'sadCount': 7023, + 'angryCount': 35416, + 'thankfulCount': 0, + 'postCount': 43, + 'interactionRate': 0.0964500674076885, + 'likeCount': 24757, + 'hahaCount': 21518, + 'commentCount': 39996 + }, + 'photo': { + 'shareCount': 2178, + 'loveCount': 963, + 'totalInteractionCount': 12856, + 'wowCount': 83, + 'sadCount': 2, + 'angryCount': 8, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.31728814395937643, + 'likeCount': 8938, + 'hahaCount': 36, + 'commentCount': 648 + } + }, + 'subscriberData': { + 'initialCount': 4051908, + 'finalCount': 4051767 + } + }, { + 'account': { + 'id': 7132, + 'name': 'The New York Times', + 'handle': 'nytimes', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t34.0-1/p200x200/38987133_2766049203424553_1238434690_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkaWRCuHf9GL6ACpzc33xhzk0PaoZZpZJjgHAUJqYB_x5SH2TI2LqBRTlosS59Dtlw&_nc_ht=scontent.xx&oh=6c30114417175d395e99d2e75167ad16&oe=5D765D57', + 'subscriberCount': 16854715, + 'url': 'https://www.facebook.com/5281959998', + 'platform': 'Facebook', + 'platformId': '5281959998', + 'verified': True + }, + 'summary': { + 'shareCount': 17541, + 'loveCount': 3889, + 'totalInteractionCount': 102490, + 'wowCount': 6687, + 'sadCount': 18117, + 'angryCount': 13956, + 'thankfulCount': 0, + 'postCount': 61, + 'interactionRate': 0.009967688669212105, + 'likeCount': 30490, + 'hahaCount': 3317, + 'commentCount': 8493 + }, + 'breakdown': { + 'link': { + 'shareCount': 17541, + 'loveCount': 3889, + 'totalInteractionCount': 102490, + 'wowCount': 6687, + 'sadCount': 18117, + 'angryCount': 13956, + 'thankfulCount': 0, + 'postCount': 61, + 'interactionRate': 0.009967688669212105, + 'likeCount': 30490, + 'hahaCount': 3317, + 'commentCount': 8493 + } + }, + 'subscriberData': { + 'initialCount': 16854203, + 'finalCount': 16854715 + } + }, { + 'account': { + 'id': 6149, + 'name': 'NPR', + 'handle': 'NPR', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/392453_10150756268711756_1078337478_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQkCimbrOrcgFhsAxAA1U5koNLGX9OLyOXdvEKxfRI0_6KYiFljw87Kls85nrj6clWA&_nc_ht=scontent.xx&oh=1883b0436c2dd854062b47c02250e87b&oe=5DF7D154', + 'subscriberCount': 6596236, + 'url': 'https://www.facebook.com/10643211755', + 'platform': 'Facebook', + 'platformId': '10643211755', + 'verified': True + }, + 'summary': { + 'shareCount': 19847, + 'loveCount': 2069, + 'totalInteractionCount': 101386, + 'wowCount': 7700, + 'sadCount': 19013, + 'angryCount': 19064, + 'thankfulCount': 0, + 'postCount': 24, + 'interactionRate': 0.06403652992957347, + 'likeCount': 21241, + 'hahaCount': 3811, + 'commentCount': 8641 + }, + 'breakdown': { + 'link': { + 'shareCount': 19847, + 'loveCount': 2069, + 'totalInteractionCount': 101386, + 'wowCount': 7700, + 'sadCount': 19013, + 'angryCount': 19064, + 'thankfulCount': 0, + 'postCount': 24, + 'interactionRate': 0.06403652992957347, + 'likeCount': 21241, + 'hahaCount': 3811, + 'commentCount': 8641 + } + }, + 'subscriberData': { + 'initialCount': 6596234, + 'finalCount': 6596236 + } + }, { + 'account': { + 'id': 279876, + 'name': 'Mad World News', + 'handle': 'MadWorldNewsCorp', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16649435_1331399193565304_7598140519586777175_n.png?_nc_cat=1&_nc_oc=AQkFf7jm82V9pnSg1x0Pqt0rlA2Yl-XqrdIF4h-iVA0BzRc8fXvud27Fd5_bf3n4adY&_nc_ht=scontent.xx&oh=db1ac67cb2a4dc589f0e879b97477ebd&oe=5E151F16', + 'subscriberCount': 2135169, + 'url': 'https://www.facebook.com/513813158657249', + 'platform': 'Facebook', + 'platformId': '513813158657249', + 'verified': False + }, + 'summary': { + 'shareCount': 30872, + 'loveCount': 4854, + 'totalInteractionCount': 98090, + 'wowCount': 269, + 'sadCount': 847, + 'angryCount': 3939, + 'thankfulCount': 0, + 'postCount': 18, + 'interactionRate': 0.2552741350943142, + 'likeCount': 47861, + 'hahaCount': 494, + 'commentCount': 8954 + }, + 'breakdown': { + 'link': { + 'shareCount': 4373, + 'loveCount': 331, + 'totalInteractionCount': 15848, + 'wowCount': 213, + 'sadCount': 370, + 'angryCount': 3925, + 'thankfulCount': 0, + 'postCount': 14, + 'interactionRate': 0.053031807841211906, + 'likeCount': 2660, + 'hahaCount': 48, + 'commentCount': 3928 + }, + 'photo': { + 'shareCount': 26499, + 'loveCount': 4523, + 'totalInteractionCount': 82242, + 'wowCount': 56, + 'sadCount': 477, + 'angryCount': 14, + 'thankfulCount': 0, + 'postCount': 4, + 'interactionRate': 0.9631925523103504, + 'likeCount': 45201, + 'hahaCount': 446, + 'commentCount': 5026 + } + }, + 'subscriberData': { + 'initialCount': 2133967, + 'finalCount': 2135169 + } + }, { + 'account': { + 'id': 13878, + 'name': 'ABC News', + 'handle': 'ABCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02', + 'subscriberCount': 14196629, + 'url': 'https://www.facebook.com/86680728811', + 'platform': 'Facebook', + 'platformId': '86680728811', + 'verified': True + }, + 'summary': { + 'loveCount': 7418, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 92879, + 'wowCount': 4940, + 'thankfulCount': 0, + 'interactionRate': 0.00991103629816666, + 'likeCount': 31672, + 'hahaCount': 5093, + 'commentCount': 13711, + 'shareCount': 14739, + 'sadCount': 9408, + 'angryCount': 5898, + 'totalVideoTimeMS': 446345, + 'postCount': 66 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 3, + 'totalInteractionCount': 2737, + 'wowCount': 1029, + 'thankfulCount': 0, + 'interactionRate': 0.019279677575040614, + 'likeCount': 314, + 'hahaCount': 14, + 'commentCount': 234, + 'shareCount': 871, + 'sadCount': 270, + 'angryCount': 2, + 'totalVideoTimeMS': 36094, + 'postCount': 1 + }, + 'owned_video': { + 'loveCount': 3449, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 22749, + 'wowCount': 1965, + 'thankfulCount': 0, + 'interactionRate': 0.026704149684683584, + 'likeCount': 9557, + 'hahaCount': 227, + 'commentCount': 3519, + 'shareCount': 3383, + 'sadCount': 555, + 'angryCount': 94, + 'totalVideoTimeMS': 446345, + 'postCount': 6 + }, + 'crosspost': { + 'loveCount': 3446, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 20012, + 'wowCount': 936, + 'thankfulCount': 0, + 'interactionRate': 0.028190452924849306, + 'likeCount': 9243, + 'hahaCount': 213, + 'commentCount': 3285, + 'shareCount': 2512, + 'sadCount': 285, + 'angryCount': 92, + 'totalVideoTimeMS': 410251, + 'postCount': 5 + }, + 'link': { + 'shareCount': 11189, + 'loveCount': 3968, + 'totalInteractionCount': 68995, + 'wowCount': 2699, + 'sadCount': 8643, + 'angryCount': 5801, + 'thankfulCount': 0, + 'postCount': 59, + 'interactionRate': 0.008234542595989214, + 'likeCount': 21775, + 'hahaCount': 4861, + 'commentCount': 10059 + }, + 'photo': { + 'shareCount': 167, + 'loveCount': 1, + 'totalInteractionCount': 1135, + 'wowCount': 276, + 'sadCount': 210, + 'angryCount': 3, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.007995043495678152, + 'likeCount': 340, + 'hahaCount': 5, + 'commentCount': 133 + } + }, + 'subscriberData': { + 'initialCount': 14195962, + 'finalCount': 14196629 + } + }, { + 'account': { + 'id': 48728, + 'name': 'Faves USA', + 'handle': 'thefavesusa', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13590243_1529567430402751_5505197343663543097_n.jpg?_nc_cat=1&_nc_oc=AQlqHYa5f3hh3Tu7bwL_7yF5WVkxCnE2WIU8c_5Fs_eMudF84ODKZoLqn8S3lZDdt3g&_nc_ht=scontent.xx&oh=b45134ffcb1aa806ced2cb018887de04&oe=5E0ED98A', + 'subscriberCount': 6323373, + 'url': 'https://www.facebook.com/532854420074062', + 'platform': 'Facebook', + 'platformId': '532854420074062', + 'verified': True + }, + 'summary': { + 'loveCount': 4461, + 'totalInteractionCount': 86313, + 'wowCount': 1431, + 'thankfulCount': 0, + 'interactionRate': 0.04135428564425114, + 'likeCount': 28142, + 'hahaCount': 8631, + 'commentCount': 13803, + 'shareCount': 25692, + 'sadCount': 1700, + 'angryCount': 2453, + 'totalVideoTimeMS': 298802, + 'postCount': 33 + }, + 'breakdown': { + 'link': { + 'shareCount': 10859, + 'loveCount': 2743, + 'totalInteractionCount': 37660, + 'wowCount': 1198, + 'sadCount': 1579, + 'angryCount': 2400, + 'thankfulCount': 0, + 'postCount': 23, + 'interactionRate': 0.02588794095588494, + 'likeCount': 11476, + 'hahaCount': 2813, + 'commentCount': 4592 + }, + 'photo': { + 'shareCount': 865, + 'loveCount': 257, + 'totalInteractionCount': 12125, + 'wowCount': 33, + 'sadCount': 64, + 'angryCount': 48, + 'thankfulCount': 0, + 'postCount': 3, + 'interactionRate': 0.06390541808352537, + 'likeCount': 3149, + 'hahaCount': 104, + 'commentCount': 7605 + }, + 'share': { + 'loveCount': 1461, + 'totalInteractionCount': 36528, + 'wowCount': 200, + 'thankfulCount': 0, + 'interactionRate': 0.08251880018803152, + 'likeCount': 13517, + 'hahaCount': 5714, + 'commentCount': 1606, + 'shareCount': 13968, + 'sadCount': 57, + 'angryCount': 5, + 'totalVideoTimeMS': 298802, + 'postCount': 7 + } + }, + 'subscriberData': { + 'initialCount': 6323442, + 'finalCount': 6323373 + } + }, { + 'account': { + 'id': 16403, + 'name': 'BBC News', + 'handle': 'bbcnews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/67191311_10156857876272217_4342089529688064000_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQk5kAdrSFMzze_w-lzADmQENwckqsjInhGPXnxTYNgxJpQ7siiGF44i0wivzxfUmPw&_nc_ht=scontent.xx&oh=5b9721d79e733db34cd496e566100993&oe=5DF5BFA1', + 'subscriberCount': 49397882, + 'url': 'https://www.facebook.com/228735667216', + 'platform': 'Facebook', + 'platformId': '228735667216', + 'verified': True + }, + 'summary': { + 'loveCount': 4060, + 'threePlusMinuteVideoCount': 4, + 'totalInteractionCount': 85997, + 'wowCount': 2481, + 'thankfulCount': 0, + 'interactionRate': 0.007569588922429944, + 'likeCount': 43029, + 'hahaCount': 6026, + 'commentCount': 11562, + 'shareCount': 12934, + 'sadCount': 4689, + 'angryCount': 1216, + 'totalVideoTimeMS': 1371143, + 'postCount': 23 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 971, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 12803, + 'wowCount': 74, + 'thankfulCount': 0, + 'interactionRate': 0.00863852258144118, + 'likeCount': 5000, + 'hahaCount': 887, + 'commentCount': 1711, + 'shareCount': 2232, + 'sadCount': 1888, + 'angryCount': 40, + 'totalVideoTimeMS': 608830, + 'postCount': 3 + }, + 'owned_video': { + 'loveCount': 2288, + 'threePlusMinuteVideoCount': 4, + 'totalInteractionCount': 36783, + 'wowCount': 926, + 'thankfulCount': 0, + 'interactionRate': 0.01063669970538832, + 'likeCount': 17727, + 'hahaCount': 1521, + 'commentCount': 3437, + 'shareCount': 7420, + 'sadCount': 3299, + 'angryCount': 165, + 'totalVideoTimeMS': 1371143, + 'postCount': 7 + }, + 'crosspost': { + 'loveCount': 1317, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 23980, + 'wowCount': 852, + 'thankfulCount': 0, + 'interactionRate': 0.012136850920023406, + 'likeCount': 12727, + 'hahaCount': 634, + 'commentCount': 1726, + 'shareCount': 5188, + 'sadCount': 1411, + 'angryCount': 125, + 'totalVideoTimeMS': 762313, + 'postCount': 4 + }, + 'link': { + 'shareCount': 5514, + 'loveCount': 1772, + 'totalInteractionCount': 49214, + 'wowCount': 1555, + 'sadCount': 1390, + 'angryCount': 1051, + 'thankfulCount': 0, + 'postCount': 16, + 'interactionRate': 0.006225323866400663, + 'likeCount': 25302, + 'hahaCount': 4505, + 'commentCount': 8125 + } + }, + 'subscriberData': { + 'initialCount': 49392159, + 'finalCount': 49397882 + } + }, { + 'account': { + 'id': 13489, + 'name': 'The Daily Caller', + 'handle': 'DailyCaller', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471', + 'subscriberCount': 5408115, + 'url': 'https://www.facebook.com/182919686769', + 'platform': 'Facebook', + 'platformId': '182919686769', + 'verified': True + }, + 'summary': { + 'shareCount': 10978, + 'loveCount': 1547, + 'totalInteractionCount': 83887, + 'wowCount': 1617, + 'sadCount': 1729, + 'angryCount': 8650, + 'thankfulCount': 0, + 'postCount': 52, + 'interactionRate': 0.02982468613123435, + 'likeCount': 21611, + 'hahaCount': 17071, + 'commentCount': 20684 + }, + 'breakdown': { + 'link': { + 'shareCount': 10839, + 'loveCount': 1540, + 'totalInteractionCount': 80480, + 'wowCount': 1600, + 'sadCount': 1711, + 'angryCount': 8392, + 'thankfulCount': 0, + 'postCount': 51, + 'interactionRate': 0.02917752927159814, + 'likeCount': 21302, + 'hahaCount': 15684, + 'commentCount': 19412 + }, + 'photo': { + 'shareCount': 139, + 'loveCount': 7, + 'totalInteractionCount': 3407, + 'wowCount': 17, + 'sadCount': 18, + 'angryCount': 258, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.06299609773658738, + 'likeCount': 309, + 'hahaCount': 1387, + 'commentCount': 1272 + } + }, + 'subscriberData': { + 'initialCount': 5408428, + 'finalCount': 5408115 + } + }, { + 'account': { + 'id': 8324, + 'name': 'MSNBC', + 'handle': 'msnbc', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15741035_1414682885294626_1846918595507309997_n.jpg?_nc_cat=1&_nc_oc=AQmNSDImiJ4dNS4a9BuTF3tFyF2W0xSOLxgQfdY6R_AXaZm8hkQc6XT-GWy5NIEe080&_nc_ht=scontent.xx&oh=968e2c2f1d76f19278ac5985b55af46d&oe=5E003BB2', + 'subscriberCount': 2290512, + 'url': 'https://www.facebook.com/273864989376427', + 'platform': 'Facebook', + 'platformId': '273864989376427', + 'verified': True + }, + 'summary': { + 'loveCount': 1671, + 'threePlusMinuteVideoCount': 4, + 'totalInteractionCount': 81269, + 'wowCount': 2954, + 'thankfulCount': 0, + 'interactionRate': 0.0437899097220585, + 'likeCount': 17184, + 'hahaCount': 3886, + 'commentCount': 12944, + 'shareCount': 17257, + 'sadCount': 4576, + 'angryCount': 20797, + 'totalVideoTimeMS': 1542583, + 'postCount': 81 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 2, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 893, + 'wowCount': 31, + 'thankfulCount': 0, + 'interactionRate': 0.019471884083786733, + 'likeCount': 120, + 'hahaCount': 2, + 'commentCount': 71, + 'shareCount': 147, + 'sadCount': 518, + 'angryCount': 2, + 'totalVideoTimeMS': 1000636, + 'postCount': 2 + }, + 'owned_video': { + 'loveCount': 5, + 'threePlusMinuteVideoCount': 4, + 'totalInteractionCount': 2327, + 'wowCount': 58, + 'thankfulCount': 0, + 'interactionRate': 0.020301403809329214, + 'likeCount': 470, + 'hahaCount': 281, + 'commentCount': 415, + 'shareCount': 432, + 'sadCount': 544, + 'angryCount': 122, + 'totalVideoTimeMS': 1542583, + 'postCount': 5 + }, + 'crosspost': { + 'loveCount': 3, + 'threePlusMinuteVideoCount': 3, + 'totalInteractionCount': 1434, + 'wowCount': 27, + 'thankfulCount': 0, + 'interactionRate': 0.020868969937331967, + 'likeCount': 350, + 'hahaCount': 279, + 'commentCount': 344, + 'shareCount': 285, + 'sadCount': 26, + 'angryCount': 120, + 'totalVideoTimeMS': 541947, + 'postCount': 3 + }, + 'link': { + 'shareCount': 16809, + 'loveCount': 1609, + 'totalInteractionCount': 78288, + 'wowCount': 2887, + 'sadCount': 4020, + 'angryCount': 20635, + 'thankfulCount': 0, + 'postCount': 74, + 'interactionRate': 0.04614749209991609, + 'likeCount': 16280, + 'hahaCount': 3593, + 'commentCount': 12455 + }, + 'photo': { + 'shareCount': 16, + 'loveCount': 57, + 'totalInteractionCount': 654, + 'wowCount': 9, + 'sadCount': 12, + 'angryCount': 40, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.014276471065915385, + 'likeCount': 434, + 'hahaCount': 12, + 'commentCount': 74 + } + }, + 'subscriberData': { + 'initialCount': 2290452, + 'finalCount': 2290512 + } + }, { + 'account': { + 'id': 311636, + 'name': 'NowThis Politics', + 'handle': 'NowThisPolitics', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/28276603_1939096412788506_2850422809072819205_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlBSULvu9xr5smvB3kmRub5MfL3SpyPxNX94GEyc5skmb19swOR40nthDv1Kip3kcw&_nc_ht=scontent.xx&oh=b734d3faa39291c805198e3ad7de3450&oe=5DFF0890', + 'subscriberCount': 6074746, + 'url': 'https://www.facebook.com/908009612563863', + 'platform': 'Facebook', + 'platformId': '908009612563863', + 'verified': True + }, + 'summary': { + 'loveCount': 1871, + 'threePlusMinuteVideoCount': 13, + 'totalInteractionCount': 78197, + 'wowCount': 2485, + 'thankfulCount': 0, + 'interactionRate': 0.06435188115661188, + 'likeCount': 12525, + 'hahaCount': 4937, + 'commentCount': 12668, + 'shareCount': 25742, + 'sadCount': 1903, + 'angryCount': 16066, + 'totalVideoTimeMS': 5019375, + 'postCount': 20 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 70, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 36820, + 'wowCount': 1089, + 'thankfulCount': 0, + 'interactionRate': 0.30307447738378734, + 'likeCount': 2722, + 'hahaCount': 195, + 'commentCount': 4278, + 'shareCount': 16490, + 'sadCount': 1151, + 'angryCount': 10825, + 'totalVideoTimeMS': 1091047, + 'postCount': 2 + }, + 'owned_video': { + 'loveCount': 1792, + 'threePlusMinuteVideoCount': 10, + 'totalInteractionCount': 73717, + 'wowCount': 2373, + 'thankfulCount': 0, + 'interactionRate': 0.07584270055986465, + 'likeCount': 11977, + 'hahaCount': 3646, + 'commentCount': 11480, + 'shareCount': 24889, + 'sadCount': 1686, + 'angryCount': 15874, + 'totalVideoTimeMS': 4317388, + 'postCount': 16 + }, + 'crosspost': { + 'loveCount': 1722, + 'threePlusMinuteVideoCount': 9, + 'totalInteractionCount': 36897, + 'wowCount': 1284, + 'thankfulCount': 0, + 'interactionRate': 0.04337866637187831, + 'likeCount': 9255, + 'hahaCount': 3451, + 'commentCount': 7202, + 'shareCount': 8399, + 'sadCount': 535, + 'angryCount': 5049, + 'totalVideoTimeMS': 3226341, + 'postCount': 14 + }, + 'share': { + 'loveCount': 79, + 'threePlusMinuteVideoCount': 3, + 'totalInteractionCount': 4480, + 'wowCount': 112, + 'thankfulCount': 0, + 'interactionRate': 0.018437991019546, + 'likeCount': 548, + 'hahaCount': 1291, + 'commentCount': 1188, + 'shareCount': 853, + 'sadCount': 217, + 'angryCount': 192, + 'totalVideoTimeMS': 701987, + 'postCount': 4 + } + }, + 'subscriberData': { + 'initialCount': 6074083, + 'finalCount': 6074746 + } + }, { + 'account': { + 'id': 13889, + 'name': 'NBC News', + 'handle': 'NBCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/58460954_3259154034104604_4667908299973197824_n.png?_nc_cat=1&_nc_oc=AQkP72-xbAw6uUN-KZG8hLfS-bT5o6BRIMSNURKuXBbEhrFa7sT75fvZfTBZDVa21CU&_nc_ht=scontent.xx&oh=ddb1e61de6dabbf61e903f59efde1f0c&oe=5DF7A653', + 'subscriberCount': 9970540, + 'url': 'https://www.facebook.com/155869377766434', + 'platform': 'Facebook', + 'platformId': '155869377766434', + 'verified': True + }, + 'summary': { + 'loveCount': 1957, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 77341, + 'wowCount': 4953, + 'thankfulCount': 0, + 'interactionRate': 0.006679650864879389, + 'likeCount': 13740, + 'hahaCount': 4266, + 'commentCount': 10747, + 'shareCount': 14838, + 'sadCount': 16923, + 'angryCount': 9917, + 'totalVideoTimeMS': 908004, + 'postCount': 116 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 1, + 'totalInteractionCount': 306, + 'wowCount': 61, + 'thankfulCount': 0, + 'interactionRate': 0.0015345143878776975, + 'likeCount': 99, + 'hahaCount': 5, + 'commentCount': 39, + 'shareCount': 69, + 'sadCount': 31, + 'angryCount': 1, + 'totalVideoTimeMS': 23829, + 'postCount': 2 + }, + 'owned_video': { + 'loveCount': 6, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 700, + 'wowCount': 86, + 'thankfulCount': 0, + 'interactionRate': 0.0014041308124371087, + 'likeCount': 234, + 'hahaCount': 9, + 'commentCount': 80, + 'shareCount': 169, + 'sadCount': 80, + 'angryCount': 36, + 'totalVideoTimeMS': 372083, + 'postCount': 5 + }, + 'crosspost': { + 'loveCount': 5, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 394, + 'wowCount': 25, + 'thankfulCount': 0, + 'interactionRate': 0.0013138652602090089, + 'likeCount': 135, + 'hahaCount': 4, + 'commentCount': 41, + 'shareCount': 100, + 'sadCount': 49, + 'angryCount': 35, + 'totalVideoTimeMS': 348254, + 'postCount': 3 + }, + 'link': { + 'shareCount': 14613, + 'loveCount': 1935, + 'totalInteractionCount': 75918, + 'wowCount': 4848, + 'sadCount': 16618, + 'angryCount': 9873, + 'thankfulCount': 0, + 'postCount': 107, + 'interactionRate': 0.007110919614413644, + 'likeCount': 13294, + 'hahaCount': 4240, + 'commentCount': 10497 + }, + 'share': { + 'loveCount': 16, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 723, + 'wowCount': 19, + 'thankfulCount': 0, + 'interactionRate': 0.001805311044561997, + 'likeCount': 212, + 'hahaCount': 17, + 'commentCount': 170, + 'shareCount': 56, + 'sadCount': 225, + 'angryCount': 8, + 'totalVideoTimeMS': 535921, + 'postCount': 4 + } + }, + 'subscriberData': { + 'initialCount': 9970622, + 'finalCount': 9970540 + } + }, { + 'account': { + 'id': 15633, + 'name': 'The Hill', + 'handle': 'TheHill', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/21463076_10155260793709087_1084657546973301538_n.png?_nc_cat=1&_nc_oc=AQnUzEj2cdNQuf4Zy5vyhBDVoFTJY2zVa3PSqT1fGgs9diAmCxedDkDI8vcWKhFV8jI&_nc_ht=scontent.xx&oh=7cdf47b74db6d8449753f4e74d7874fc&oe=5E028CD1', + 'subscriberCount': 1380989, + 'url': 'https://www.facebook.com/7533944086', + 'platform': 'Facebook', + 'platformId': '7533944086', + 'verified': True + }, + 'summary': { + 'shareCount': 10494, + 'loveCount': 4188, + 'totalInteractionCount': 73354, + 'wowCount': 1779, + 'sadCount': 1862, + 'angryCount': 7496, + 'thankfulCount': 0, + 'postCount': 53, + 'interactionRate': 0.10021734274150189, + 'likeCount': 30160, + 'hahaCount': 6470, + 'commentCount': 10905 + }, + 'breakdown': { + 'link': { + 'shareCount': 10494, + 'loveCount': 4188, + 'totalInteractionCount': 73354, + 'wowCount': 1779, + 'sadCount': 1862, + 'angryCount': 7496, + 'thankfulCount': 0, + 'postCount': 53, + 'interactionRate': 0.10021734274150189, + 'likeCount': 30160, + 'hahaCount': 6470, + 'commentCount': 10905 + } + }, + 'subscriberData': { + 'initialCount': 1381008, + 'finalCount': 1380989 + } + }, { + 'account': { + 'id': 93420, + 'name': 'The Western Journal', + 'handle': 'WesternJournal', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49664345_10157205261148984_1256195277434388480_n.png?_nc_cat=1&_nc_oc=AQkUo1GJrlGqxXcfjFxGkOcXookw_tgn8qATXCSI0ICt6sibuBdTtyIuuWj9iPLw5ZM&_nc_ht=scontent.xx&oh=bf010f921f678fbb0032a465900b5f24&oe=5DF8F16D', + 'subscriberCount': 5184899, + 'url': 'https://www.facebook.com/123624513983', + 'platform': 'Facebook', + 'platformId': '123624513983', + 'verified': True + }, + 'summary': { + 'shareCount': 16119, + 'loveCount': 3712, + 'totalInteractionCount': 65026, + 'wowCount': 858, + 'sadCount': 594, + 'angryCount': 1324, + 'thankfulCount': 0, + 'postCount': 36, + 'interactionRate': 0.034831203666881, + 'likeCount': 35643, + 'hahaCount': 1656, + 'commentCount': 5120 + }, + 'breakdown': { + 'link': { + 'shareCount': 4234, + 'loveCount': 1235, + 'totalInteractionCount': 24785, + 'wowCount': 799, + 'sadCount': 581, + 'angryCount': 1289, + 'thankfulCount': 0, + 'postCount': 34, + 'interactionRate': 0.014040485199052807, + 'likeCount': 11302, + 'hahaCount': 1460, + 'commentCount': 3885 + }, + 'photo': { + 'shareCount': 11885, + 'loveCount': 2477, + 'totalInteractionCount': 40241, + 'wowCount': 59, + 'sadCount': 13, + 'angryCount': 35, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.38804198105074517, + 'likeCount': 24341, + 'hahaCount': 196, + 'commentCount': 1235 + } + }, + 'subscriberData': { + 'initialCount': 5185113, + 'finalCount': 5184899 + } + }, { + 'account': { + 'id': 5860, + 'name': 'Being Liberal', + 'handle': 'beingliberal.org', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16865169_10154418564961275_3050958479071030073_n.png?_nc_cat=1&_nc_oc=AQlPF5wIrIXWCeRBPDA5P17NqQMaux6LCm9Ak8V6ktaSHP0ajoY7MreFOF-RleH_5sQ&_nc_ht=scontent.xx&oh=39015e43af0ae9881035d6aa4a9fe5fc&oe=5E0A093D', + 'subscriberCount': 1693705, + 'url': 'https://www.facebook.com/177486166274', + 'platform': 'Facebook', + 'platformId': '177486166274', + 'verified': True + }, + 'summary': { + 'shareCount': 13979, + 'loveCount': 1747, + 'totalInteractionCount': 58401, + 'wowCount': 1941, + 'sadCount': 3755, + 'angryCount': 13887, + 'thankfulCount': 0, + 'postCount': 37, + 'interactionRate': 0.09316871951757733, + 'likeCount': 12741, + 'hahaCount': 4326, + 'commentCount': 6025 + }, + 'breakdown': { + 'link': { + 'shareCount': 13383, + 'loveCount': 1724, + 'totalInteractionCount': 55876, + 'wowCount': 1931, + 'sadCount': 3745, + 'angryCount': 13876, + 'thankfulCount': 0, + 'postCount': 36, + 'interactionRate': 0.09163362020993664, + 'likeCount': 12116, + 'hahaCount': 3161, + 'commentCount': 5940 + }, + 'photo': { + 'shareCount': 596, + 'loveCount': 23, + 'totalInteractionCount': 2525, + 'wowCount': 10, + 'sadCount': 10, + 'angryCount': 11, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.14908175968433635, + 'likeCount': 625, + 'hahaCount': 1165, + 'commentCount': 85 + } + }, + 'subscriberData': { + 'initialCount': 1693698, + 'finalCount': 1693705 + } + }, { + 'account': { + 'id': 19065, + 'name': 'The Independent', + 'handle': 'TheIndependentOnline', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/11051795_10152732082756636_6705742038347351188_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQmApCC_log9_TfPU5-TLVRKHyBo2YH6UPG2d6R-43r5u7HhElr7QPKk9J_AXR9q1Ac&_nc_ht=scontent.xx&oh=47ac79067cb2e33520f6920eb409611d&oe=5E0FED75', + 'subscriberCount': 8834731, + 'url': 'https://www.facebook.com/13312631635', + 'platform': 'Facebook', + 'platformId': '13312631635', + 'verified': True + }, + 'summary': { + 'loveCount': 3261, + 'totalInteractionCount': 57897, + 'wowCount': 1631, + 'thankfulCount': 0, + 'interactionRate': 0.009497613597231903, + 'likeCount': 21280, + 'hahaCount': 8014, + 'commentCount': 9023, + 'shareCount': 11766, + 'sadCount': 737, + 'angryCount': 2185, + 'totalVideoTimeMS': 375324, + 'postCount': 69 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 276, + 'totalInteractionCount': 6287, + 'wowCount': 215, + 'thankfulCount': 0, + 'interactionRate': 0.010165502992031287, + 'likeCount': 1951, + 'hahaCount': 675, + 'commentCount': 1599, + 'shareCount': 1159, + 'sadCount': 98, + 'angryCount': 314, + 'totalVideoTimeMS': 375324, + 'postCount': 7 + }, + 'crosspost': { + 'loveCount': 276, + 'totalInteractionCount': 6287, + 'wowCount': 215, + 'thankfulCount': 0, + 'interactionRate': 0.010165502992031287, + 'likeCount': 1951, + 'hahaCount': 675, + 'commentCount': 1599, + 'shareCount': 1159, + 'sadCount': 98, + 'angryCount': 314, + 'totalVideoTimeMS': 375324, + 'postCount': 7 + }, + 'link': { + 'shareCount': 6888, + 'loveCount': 1059, + 'totalInteractionCount': 29264, + 'wowCount': 843, + 'sadCount': 321, + 'angryCount': 319, + 'thankfulCount': 0, + 'postCount': 37, + 'interactionRate': 0.008942925794771399, + 'likeCount': 10342, + 'hahaCount': 4841, + 'commentCount': 4651 + }, + 'video': { + 'shareCount': 3719, + 'loveCount': 1926, + 'totalInteractionCount': 22346, + 'wowCount': 573, + 'sadCount': 318, + 'angryCount': 1552, + 'thankfulCount': 0, + 'postCount': 25, + 'interactionRate': 0.010108902195861849, + 'likeCount': 8987, + 'hahaCount': 2498, + 'commentCount': 2773 + } + }, + 'subscriberData': { + 'initialCount': 8832865, + 'finalCount': 8834731 + } + }, { + 'account': { + 'id': 48734, + 'name': 'Young Conservatives', + 'handle': 'TheYoungCons', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/45427184_10155855954205841_8373169778709233664_n.jpg?_nc_cat=1&_nc_oc=AQmAgxZhqj9CXmiY228VRUATEHxELlR7p8BpNguYxOU_n6uxWw17ssXZSIF2mv2DreA&_nc_ht=scontent.xx&oh=ea10aeb60d4d31efb95e2c0a9f7ee098&oe=5DFE69A3', + 'subscriberCount': 1000057, + 'url': 'https://www.facebook.com/147772245840', + 'platform': 'Facebook', + 'platformId': '147772245840', + 'verified': False + }, + 'summary': { + 'loveCount': 1973, + 'totalInteractionCount': 55399, + 'wowCount': 344, + 'thankfulCount': 0, + 'interactionRate': 0.27690138450692253, + 'likeCount': 16735, + 'hahaCount': 4505, + 'commentCount': 21226, + 'shareCount': 6426, + 'sadCount': 1039, + 'angryCount': 3151, + 'totalVideoTimeMS': 28928, + 'postCount': 20 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 2, + 'totalInteractionCount': 87, + 'wowCount': 0, + 'thankfulCount': 0, + 'interactionRate': 0.0043000215001075, + 'likeCount': 63, + 'hahaCount': 1, + 'commentCount': 14, + 'shareCount': 6, + 'sadCount': 0, + 'angryCount': 1, + 'totalVideoTimeMS': 28928, + 'postCount': 2 + }, + 'owned_video': { + 'loveCount': 2, + 'totalInteractionCount': 87, + 'wowCount': 0, + 'thankfulCount': 0, + 'interactionRate': 0.0043000215001075, + 'likeCount': 63, + 'hahaCount': 1, + 'commentCount': 14, + 'shareCount': 6, + 'sadCount': 0, + 'angryCount': 1, + 'totalVideoTimeMS': 28928, + 'postCount': 2 + }, + 'link': { + 'shareCount': 231, + 'loveCount': 70, + 'totalInteractionCount': 2764, + 'wowCount': 35, + 'sadCount': 61, + 'angryCount': 282, + 'thankfulCount': 0, + 'postCount': 9, + 'interactionRate': 0.030700153500767505, + 'likeCount': 880, + 'hahaCount': 465, + 'commentCount': 740 + }, + 'photo': { + 'shareCount': 6189, + 'loveCount': 1901, + 'totalInteractionCount': 52548, + 'wowCount': 309, + 'sadCount': 978, + 'angryCount': 2868, + 'thankfulCount': 0, + 'postCount': 9, + 'interactionRate': 0.5838029190145951, + 'likeCount': 15792, + 'hahaCount': 4039, + 'commentCount': 20472 + } + }, + 'subscriberData': { + 'initialCount': 999933, + 'finalCount': 1000057 + } + }, { + 'account': { + 'id': 48733, + 'name': 'Conservative Tribune by WJ', + 'handle': 'theconservativetribune', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/46353000_2202571529821371_2816384259860725760_n.png?_nc_cat=1&_nc_oc=AQmyLmtQSJjNV6pRGGi1jlDx51XV7ULbxHYoibyNBKmronK_dpS07OVljvF5-BdX07s&_nc_ht=scontent.xx&oh=eeade969630115fc0c1ec64d4a462e0f&oe=5DF58CE0', + 'subscriberCount': 4272095, + 'url': 'https://www.facebook.com/519305544814653', + 'platform': 'Facebook', + 'platformId': '519305544814653', + 'verified': True + }, + 'summary': { + 'shareCount': 13762, + 'loveCount': 3067, + 'totalInteractionCount': 54701, + 'wowCount': 483, + 'sadCount': 429, + 'angryCount': 2003, + 'thankfulCount': 0, + 'postCount': 21, + 'interactionRate': 0.06095214554361168, + 'likeCount': 29598, + 'hahaCount': 1289, + 'commentCount': 4070 + }, + 'breakdown': { + 'link': { + 'shareCount': 3972, + 'loveCount': 668, + 'totalInteractionCount': 19515, + 'wowCount': 438, + 'sadCount': 421, + 'angryCount': 1982, + 'thankfulCount': 0, + 'postCount': 19, + 'interactionRate': 0.024039114237054224, + 'likeCount': 7802, + 'hahaCount': 1166, + 'commentCount': 3066 + }, + 'photo': { + 'shareCount': 9790, + 'loveCount': 2399, + 'totalInteractionCount': 35186, + 'wowCount': 45, + 'sadCount': 8, + 'angryCount': 21, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.4118014963704917, + 'likeCount': 21796, + 'hahaCount': 123, + 'commentCount': 1004 + } + }, + 'subscriberData': { + 'initialCount': 4272313, + 'finalCount': 4272095 + } + }, { + 'account': { + 'id': 10337, + 'name': 'Washington Post', + 'handle': 'washingtonpost', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/21430382_10156479428327293_4985425836902947855_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlVAdyvl5eHjwkppWx8pvifrl3XbqjhakYzwfQ1AHjPFaQPjFxNF4BbZq5BQ1nys4Y&_nc_ht=scontent.xx&oh=6cea07f8fc3edae1f7c743fc8997901c&oe=5DC8AB0A', + 'subscriberCount': 6289503, + 'url': 'https://www.facebook.com/6250307292', + 'platform': 'Facebook', + 'platformId': '6250307292', + 'verified': True + }, + 'summary': { + 'shareCount': 11671, + 'loveCount': 536, + 'totalInteractionCount': 50242, + 'wowCount': 2956, + 'sadCount': 2904, + 'angryCount': 11827, + 'thankfulCount': 0, + 'postCount': 27, + 'interactionRate': 0.029573864462979164, + 'likeCount': 12934, + 'hahaCount': 2905, + 'commentCount': 4509 + }, + 'breakdown': { + 'link': { + 'shareCount': 11671, + 'loveCount': 536, + 'totalInteractionCount': 50242, + 'wowCount': 2956, + 'sadCount': 2904, + 'angryCount': 11827, + 'thankfulCount': 0, + 'postCount': 27, + 'interactionRate': 0.029573864462979164, + 'likeCount': 12934, + 'hahaCount': 2905, + 'commentCount': 4509 + } + }, + 'subscriberData': { + 'initialCount': 6289171, + 'finalCount': 6289503 + } + }, { + 'account': { + 'id': 5862, + 'name': 'BuzzFeed', + 'handle': 'BuzzFeed', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/11222622_10153870407270329_4094729505669388790_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlaEUp906VUUmeEPgfBCNmaczf4owSg6ehvwRebY_UVmSGVjDB_IUr4WGPgzRnptXU&_nc_ht=scontent.xx&oh=96b0a01485175975acdaeb06feb9d222&oe=5E06A54B', + 'subscriberCount': 11870650, + 'url': 'https://www.facebook.com/21898300328', + 'platform': 'Facebook', + 'platformId': '21898300328', + 'verified': True + }, + 'summary': { + 'loveCount': 2960, + 'threePlusMinuteVideoCount': 8, + 'totalInteractionCount': 48466, + 'wowCount': 2861, + 'thankfulCount': 0, + 'interactionRate': 0.006798235407223357, + 'likeCount': 20456, + 'hahaCount': 4320, + 'commentCount': 7550, + 'shareCount': 6343, + 'sadCount': 590, + 'angryCount': 3386, + 'totalVideoTimeMS': 4256816, + 'postCount': 60 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 405, + 'threePlusMinuteVideoCount': 3, + 'totalInteractionCount': 3724, + 'wowCount': 389, + 'thankfulCount': 0, + 'interactionRate': 0.01045428765844385, + 'likeCount': 1849, + 'hahaCount': 30, + 'commentCount': 479, + 'shareCount': 572, + 'sadCount': 0, + 'angryCount': 0, + 'totalVideoTimeMS': 755856, + 'postCount': 3 + }, + 'owned_video': { + 'loveCount': 421, + 'threePlusMinuteVideoCount': 6, + 'totalInteractionCount': 3879, + 'wowCount': 391, + 'thankfulCount': 0, + 'interactionRate': 0.005441957959189949, + 'likeCount': 1948, + 'hahaCount': 40, + 'commentCount': 487, + 'shareCount': 591, + 'sadCount': 0, + 'angryCount': 1, + 'totalVideoTimeMS': 2301642, + 'postCount': 6 + }, + 'crosspost': { + 'loveCount': 16, + 'threePlusMinuteVideoCount': 3, + 'totalInteractionCount': 155, + 'wowCount': 2, + 'thankfulCount': 0, + 'interactionRate': 0.00042962825993604853, + 'likeCount': 99, + 'hahaCount': 10, + 'commentCount': 8, + 'shareCount': 19, + 'sadCount': 0, + 'angryCount': 1, + 'totalVideoTimeMS': 1545786, + 'postCount': 3 + }, + 'link': { + 'shareCount': 4693, + 'loveCount': 2254, + 'totalInteractionCount': 38029, + 'wowCount': 1877, + 'sadCount': 589, + 'angryCount': 3383, + 'thankfulCount': 0, + 'postCount': 52, + 'interactionRate': 0.006158005059083363, + 'likeCount': 16435, + 'hahaCount': 4247, + 'commentCount': 4551 + }, + 'share': { + 'loveCount': 285, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 6558, + 'wowCount': 593, + 'thankfulCount': 0, + 'interactionRate': 0.027622569888829475, + 'likeCount': 2073, + 'hahaCount': 33, + 'commentCount': 2512, + 'shareCount': 1059, + 'sadCount': 1, + 'angryCount': 2, + 'totalVideoTimeMS': 1955174, + 'postCount': 2 + } + }, + 'subscriberData': { + 'initialCount': 11870805, + 'finalCount': 11870650 + } + }, { + 'account': { + 'id': 14655, + 'name': 'CBS News', + 'handle': 'CBSNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/c7.0.200.200a/p200x200/11052868_10153128917450950_7657871426571821819_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlXjGTrfksAnoG50hBe4WDnf00w6XeLzrCR-xvjCQkB_VlwwTuquCV4zQB0tMkmVTU&_nc_ht=scontent.xx&oh=66fa68d473b2015c3875d62e625a12d1&oe=5E0EF6CB', + 'subscriberCount': 5892766, + 'url': 'https://www.facebook.com/131459315949', + 'platform': 'Facebook', + 'platformId': '131459315949', + 'verified': True + }, + 'summary': { + 'loveCount': 4010, + 'totalInteractionCount': 45029, + 'wowCount': 1898, + 'thankfulCount': 0, + 'interactionRate': 0.009418505700614213, + 'likeCount': 15748, + 'hahaCount': 3452, + 'commentCount': 7560, + 'shareCount': 7098, + 'sadCount': 2466, + 'angryCount': 2797, + 'totalVideoTimeMS': 460708, + 'postCount': 81 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 1530, + 'totalInteractionCount': 7475, + 'wowCount': 146, + 'thankfulCount': 0, + 'interactionRate': 0.015850242025898513, + 'likeCount': 3158, + 'hahaCount': 48, + 'commentCount': 519, + 'shareCount': 794, + 'sadCount': 969, + 'angryCount': 311, + 'totalVideoTimeMS': 460708, + 'postCount': 8 + }, + 'crosspost': { + 'loveCount': 1530, + 'totalInteractionCount': 7475, + 'wowCount': 146, + 'thankfulCount': 0, + 'interactionRate': 0.015850242025898513, + 'likeCount': 3158, + 'hahaCount': 48, + 'commentCount': 519, + 'shareCount': 794, + 'sadCount': 969, + 'angryCount': 311, + 'totalVideoTimeMS': 460708, + 'postCount': 8 + }, + 'link': { + 'shareCount': 1042, + 'loveCount': 239, + 'totalInteractionCount': 6094, + 'wowCount': 522, + 'sadCount': 395, + 'angryCount': 129, + 'thankfulCount': 0, + 'postCount': 23, + 'interactionRate': 0.004480154062994869, + 'likeCount': 2279, + 'hahaCount': 329, + 'commentCount': 1159 + }, + 'video': { + 'shareCount': 5262, + 'loveCount': 2241, + 'totalInteractionCount': 31460, + 'wowCount': 1230, + 'sadCount': 1102, + 'angryCount': 2357, + 'thankfulCount': 0, + 'postCount': 50, + 'interactionRate': 0.01067430646069611, + 'likeCount': 10311, + 'hahaCount': 3075, + 'commentCount': 5882 + } + }, + 'subscriberData': { + 'initialCount': 5892543, + 'finalCount': 5892766 + } + }, { + 'account': { + 'id': 3832, + 'name': 'MoveOn', + 'handle': 'moveon', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/31206661_10155375246245493_295037061581229251_n.png?_nc_cat=1&_nc_oc=AQlSE1FqdCbaeopNV1yaNtJ3CmFLidqKES5CzQDuKERpCBGKUk_e3fO242Wi3KvNKSE&_nc_ht=scontent.xx&oh=ca9e5b7aef01fe823dc1929cfd53827d&oe=5E10EEAD', + 'subscriberCount': 1654130, + 'url': 'https://www.facebook.com/7292655492', + 'platform': 'Facebook', + 'platformId': '7292655492', + 'verified': True + }, + 'summary': { + 'loveCount': 2867, + 'totalInteractionCount': 43301, + 'wowCount': 479, + 'thankfulCount': 0, + 'interactionRate': 0.11897438997664947, + 'likeCount': 18596, + 'hahaCount': 432, + 'commentCount': 5458, + 'shareCount': 8787, + 'sadCount': 2329, + 'angryCount': 4353, + 'totalVideoTimeMS': 91134, + 'postCount': 22 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 573, + 'totalInteractionCount': 6538, + 'wowCount': 23, + 'thankfulCount': 0, + 'interactionRate': 0.19762565083011538, + 'likeCount': 1206, + 'hahaCount': 202, + 'commentCount': 3324, + 'shareCount': 927, + 'sadCount': 28, + 'angryCount': 255, + 'totalVideoTimeMS': 91134, + 'postCount': 2 + }, + 'crosspost': { + 'loveCount': 573, + 'totalInteractionCount': 6538, + 'wowCount': 23, + 'thankfulCount': 0, + 'interactionRate': 0.19762565083011538, + 'likeCount': 1206, + 'hahaCount': 202, + 'commentCount': 3324, + 'shareCount': 927, + 'sadCount': 28, + 'angryCount': 255, + 'totalVideoTimeMS': 91134, + 'postCount': 2 + }, + 'link': { + 'shareCount': 6704, + 'loveCount': 1923, + 'totalInteractionCount': 28655, + 'wowCount': 341, + 'sadCount': 1916, + 'angryCount': 3679, + 'thankfulCount': 0, + 'postCount': 16, + 'interactionRate': 0.10821349494827366, + 'likeCount': 12332, + 'hahaCount': 160, + 'commentCount': 1600 + }, + 'photo': { + 'shareCount': 1156, + 'loveCount': 371, + 'totalInteractionCount': 8108, + 'wowCount': 115, + 'sadCount': 385, + 'angryCount': 419, + 'thankfulCount': 0, + 'postCount': 4, + 'interactionRate': 0.12254120349729089, + 'likeCount': 5058, + 'hahaCount': 70, + 'commentCount': 534 + } + }, + 'subscriberData': { + 'initialCount': 1654145, + 'finalCount': 1654130 + } + }, { + 'account': { + 'id': 3921, + 'name': 'The Rachel Maddow Show', + 'handle': 'therachelmaddowshow', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/560412_10150641324209067_326441500_n.jpg?_nc_cat=1&_nc_oc=AQll8voihNLTZqhNJxHo54RezqGbTpA2ADMeAJ0m1c--3__ynoI3yGrzvpSMzT6QrNI&_nc_ht=scontent.xx&oh=8f8d327dc4a47e1af85f9d3da82d4eb3&oe=5DFB0DA7', + 'subscriberCount': 2643600, + 'url': 'https://www.facebook.com/25987609066', + 'platform': 'Facebook', + 'platformId': '25987609066', + 'verified': True + }, + 'summary': { + 'shareCount': 10879, + 'loveCount': 42, + 'totalInteractionCount': 41298, + 'wowCount': 3593, + 'sadCount': 446, + 'angryCount': 15180, + 'thankfulCount': 0, + 'postCount': 3, + 'interactionRate': 0.5207293085186866, + 'likeCount': 3884, + 'hahaCount': 2154, + 'commentCount': 5120 + }, + 'breakdown': { + 'link': { + 'shareCount': 10879, + 'loveCount': 42, + 'totalInteractionCount': 41298, + 'wowCount': 3593, + 'sadCount': 446, + 'angryCount': 15180, + 'thankfulCount': 0, + 'postCount': 3, + 'interactionRate': 0.5207293085186866, + 'likeCount': 3884, + 'hahaCount': 2154, + 'commentCount': 5120 + } + }, + 'subscriberData': { + 'initialCount': 2643600, + 'finalCount': 2643600 + } + }, { + 'account': { + 'id': 5740, + 'name': 'The Guardian', + 'handle': 'theguardian', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/46160148_10157340584076323_3990431626264838144_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkKD6tb0oraHl_Qq9dA1S51ktyWhE9lPo7udOrFCRkfCctJldfDrwPVn7PcSDSY5Sc&_nc_ht=scontent.xx&oh=8c51a127f7d06b002a6fcba57abe5181&oe=5DFDE22E', + 'subscriberCount': 8186263, + 'url': 'https://www.facebook.com/10513336322', + 'platform': 'Facebook', + 'platformId': '10513336322', + 'verified': True + }, + 'summary': { + 'shareCount': 5542, + 'loveCount': 2243, + 'totalInteractionCount': 41152, + 'wowCount': 1280, + 'sadCount': 2436, + 'angryCount': 2683, + 'thankfulCount': 0, + 'postCount': 62, + 'interactionRate': 0.008099022583568658, + 'likeCount': 19896, + 'hahaCount': 1919, + 'commentCount': 5153 + }, + 'breakdown': { + 'link': { + 'shareCount': 5542, + 'loveCount': 2243, + 'totalInteractionCount': 41152, + 'wowCount': 1280, + 'sadCount': 2436, + 'angryCount': 2683, + 'thankfulCount': 0, + 'postCount': 62, + 'interactionRate': 0.008099022583568658, + 'likeCount': 19896, + 'hahaCount': 1919, + 'commentCount': 5153 + } + }, + 'subscriberData': { + 'initialCount': 8186083, + 'finalCount': 8186263 + } + }, { + 'account': { + 'id': 1260835, + 'name': 'Democratic Coalition Against Trump', + 'handle': 'TheDemocraticCoalition', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16939431_599665190236860_5252814024528030433_n.png?_nc_cat=1&_nc_oc=AQlrHI03LbP9V0m-Es6d3p44SF5EnNO5lv0A3X_61rp1hvImkRh1UwXCXhOmUPsM4VQ&_nc_ht=scontent.xx&oh=418e568864bbd9412488d5ff27c67545&oe=5DCAD77C', + 'subscriberCount': 404090, + 'url': 'https://www.facebook.com/452797224923658', + 'platform': 'Facebook', + 'platformId': '452797224923658', + 'verified': False + }, + 'summary': { + 'shareCount': 8954, + 'loveCount': 1151, + 'totalInteractionCount': 38707, + 'wowCount': 539, + 'sadCount': 1101, + 'angryCount': 6925, + 'thankfulCount': 0, + 'postCount': 23, + 'interactionRate': 0.41631602395920997, + 'likeCount': 13869, + 'hahaCount': 2374, + 'commentCount': 3794 + }, + 'breakdown': { + 'link': { + 'shareCount': 3325, + 'loveCount': 287, + 'totalInteractionCount': 15744, + 'wowCount': 392, + 'sadCount': 337, + 'angryCount': 3378, + 'thankfulCount': 0, + 'postCount': 11, + 'interactionRate': 0.35419038661452407, + 'likeCount': 5126, + 'hahaCount': 901, + 'commentCount': 1998 + }, + 'photo': { + 'shareCount': 5629, + 'loveCount': 864, + 'totalInteractionCount': 22963, + 'wowCount': 147, + 'sadCount': 764, + 'angryCount': 3547, + 'thankfulCount': 0, + 'postCount': 12, + 'interactionRate': 0.4734914113162715, + 'likeCount': 8743, + 'hahaCount': 1473, + 'commentCount': 1796 + } + }, + 'subscriberData': { + 'initialCount': 403950, + 'finalCount': 404090 + } + }, { + 'account': { + 'id': 8806, + 'name': 'U.S. Senator Bernie Sanders', + 'handle': 'senatorsanders', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/c78.0.200.200a/p200x200/400832_10151148541197908_1621512611_n.jpg?_nc_cat=1&_nc_oc=AQnOgGmDEpewZmHOa_WpZV3xQAnvB1i9gDx-nu5MzPilI0ir8LtVvORdwBDCvfzICSE&_nc_ht=scontent.xx&oh=a94d5b4a17b62ba41e432a7968447146&oe=5E07418B', + 'subscriberCount': 7504771, + 'url': 'https://www.facebook.com/9124187907', + 'platform': 'Facebook', + 'platformId': '9124187907', + 'verified': True + }, + 'summary': { + 'loveCount': 1231, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 36094, + 'wowCount': 423, + 'thankfulCount': 0, + 'interactionRate': 0.09617611341372971, + 'likeCount': 20544, + 'hahaCount': 139, + 'commentCount': 1454, + 'shareCount': 6851, + 'sadCount': 3025, + 'angryCount': 2427, + 'totalVideoTimeMS': 304380, + 'postCount': 5 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 466, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 13191, + 'wowCount': 302, + 'thankfulCount': 0, + 'interactionRate': 0.08787496092595559, + 'likeCount': 6492, + 'hahaCount': 76, + 'commentCount': 848, + 'shareCount': 2723, + 'sadCount': 254, + 'angryCount': 2030, + 'totalVideoTimeMS': 304380, + 'postCount': 2 + }, + 'crosspost': { + 'loveCount': 466, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 13191, + 'wowCount': 302, + 'thankfulCount': 0, + 'interactionRate': 0.08787496092595559, + 'likeCount': 6492, + 'hahaCount': 76, + 'commentCount': 848, + 'shareCount': 2723, + 'sadCount': 254, + 'angryCount': 2030, + 'totalVideoTimeMS': 304380, + 'postCount': 2 + }, + 'link': { + 'shareCount': 200, + 'loveCount': 141, + 'totalInteractionCount': 2317, + 'wowCount': 43, + 'sadCount': 156, + 'angryCount': 46, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.015429750531047243, + 'likeCount': 1501, + 'hahaCount': 32, + 'commentCount': 198 + }, + 'photo': { + 'shareCount': 3928, + 'loveCount': 624, + 'totalInteractionCount': 20586, + 'wowCount': 78, + 'sadCount': 2615, + 'angryCount': 351, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.27429779311929064, + 'likeCount': 12551, + 'hahaCount': 31, + 'commentCount': 408 + } + }, + 'subscriberData': { + 'initialCount': 7505193, + 'finalCount': 7504771 + } + }, { + 'account': { + 'id': 3919, + 'name': 'Upworthy', + 'handle': 'Upworthy', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1914363_1176320005742189_4709951186905632219_n.png?_nc_cat=1&_nc_oc=AQlPiX5mYxZC_Xj8_M4a7JZZvCD27izvAXTMtobXrLjwA4S5Pel-CsMh5GMouHt8LNg&_nc_ht=scontent.xx&oh=ba4e0db7c2521356dc17108d8aa4a12a&oe=5E04D944', + 'subscriberCount': 11752205, + 'url': 'https://www.facebook.com/354522044588660', + 'platform': 'Facebook', + 'platformId': '354522044588660', + 'verified': True + }, + 'summary': { + 'shareCount': 6787, + 'loveCount': 4789, + 'totalInteractionCount': 33998, + 'wowCount': 2581, + 'sadCount': 2794, + 'angryCount': 601, + 'thankfulCount': 0, + 'postCount': 10, + 'interactionRate': 0.028922232040710656, + 'likeCount': 14654, + 'hahaCount': 198, + 'commentCount': 1594 + }, + 'breakdown': { + 'link': { + 'shareCount': 6450, + 'loveCount': 2392, + 'totalInteractionCount': 26074, + 'wowCount': 2494, + 'sadCount': 2791, + 'angryCount': 601, + 'thankfulCount': 0, + 'postCount': 9, + 'interactionRate': 0.024650693210337974, + 'likeCount': 9648, + 'hahaCount': 195, + 'commentCount': 1503 + }, + 'photo': { + 'shareCount': 337, + 'loveCount': 2397, + 'totalInteractionCount': 7924, + 'wowCount': 87, + 'sadCount': 3, + 'angryCount': 0, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.06742564480452817, + 'likeCount': 5006, + 'hahaCount': 3, + 'commentCount': 91 + } + }, + 'subscriberData': { + 'initialCount': 11752205, + 'finalCount': 11752205 + } + }, { + 'account': { + 'id': 16337, + 'name': 'Yahoo News', + 'handle': 'yahoonews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1234558_10151822723996037_1232781499_n.jpg?_nc_cat=1&_nc_oc=AQkPmfbCJFc9Ll_w6v-FBqGGulHvLsK6m9J20HAPS45YGyFGlkUw6ZZKS6yuaKxI_V0&_nc_ht=scontent.xx&oh=e2ffaa2bbb04dd746da7d26542134656&oe=5DFF9BED', + 'subscriberCount': 7865795, + 'url': 'https://www.facebook.com/338028696036', + 'platform': 'Facebook', + 'platformId': '338028696036', + 'verified': True + }, + 'summary': { + 'shareCount': 8090, + 'loveCount': 1049, + 'totalInteractionCount': 30391, + 'wowCount': 1730, + 'sadCount': 4389, + 'angryCount': 2623, + 'thankfulCount': 0, + 'postCount': 42, + 'interactionRate': 0.009191497800969112, + 'likeCount': 6682, + 'hahaCount': 1594, + 'commentCount': 4234 + }, + 'breakdown': { + 'link': { + 'shareCount': 8069, + 'loveCount': 1042, + 'totalInteractionCount': 29826, + 'wowCount': 1722, + 'sadCount': 4382, + 'angryCount': 2554, + 'thankfulCount': 0, + 'postCount': 41, + 'interactionRate': 0.00924234979433547, + 'likeCount': 6615, + 'hahaCount': 1467, + 'commentCount': 3975 + }, + 'photo': { + 'shareCount': 21, + 'loveCount': 7, + 'totalInteractionCount': 565, + 'wowCount': 8, + 'sadCount': 7, + 'angryCount': 69, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.007182844062997992, + 'likeCount': 67, + 'hahaCount': 127, + 'commentCount': 259 + } + }, + 'subscriberData': { + 'initialCount': 7866135, + 'finalCount': 7865795 + } + }, { + 'account': { + 'id': 51124, + 'name': 'Robert Reich', + 'handle': 'RBReich', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/14925611_1361055967240329_5201622253030095993_n.png?_nc_cat=1&_nc_oc=AQnnHM7tmaHqxLEWywroKLLlnR80odzpdoGwJG-9nfBPEgYbyMODen29YtOfkwesH5s&_nc_ht=scontent.xx&oh=ef9b38d97600f9e166d77c3ac2b47a40&oe=5DF447FA', + 'subscriberCount': 2450538, + 'url': 'https://www.facebook.com/142474049098533', + 'platform': 'Facebook', + 'platformId': '142474049098533', + 'verified': True + }, + 'summary': { + 'loveCount': 116, + 'totalInteractionCount': 28106, + 'wowCount': 598, + 'thankfulCount': 0, + 'interactionRate': 0.3822872226576185, + 'likeCount': 9539, + 'hahaCount': 2977, + 'commentCount': 1407, + 'shareCount': 7700, + 'sadCount': 1362, + 'angryCount': 4407, + 'totalVideoTimeMS': 141182, + 'postCount': 3 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 50, + 'totalInteractionCount': 4781, + 'wowCount': 48, + 'thankfulCount': 0, + 'interactionRate': 0.19510196536358604, + 'likeCount': 2047, + 'hahaCount': 26, + 'commentCount': 258, + 'shareCount': 1675, + 'sadCount': 242, + 'angryCount': 435, + 'totalVideoTimeMS': 141182, + 'postCount': 1 + }, + 'crosspost': { + 'loveCount': 50, + 'totalInteractionCount': 4781, + 'wowCount': 48, + 'thankfulCount': 0, + 'interactionRate': 0.19510196536358604, + 'likeCount': 2047, + 'hahaCount': 26, + 'commentCount': 258, + 'shareCount': 1675, + 'sadCount': 242, + 'angryCount': 435, + 'totalVideoTimeMS': 141182, + 'postCount': 1 + }, + 'link': { + 'shareCount': 3759, + 'loveCount': 10, + 'totalInteractionCount': 10777, + 'wowCount': 476, + 'sadCount': 167, + 'angryCount': 3616, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.439785375595768, + 'likeCount': 2227, + 'hahaCount': 17, + 'commentCount': 505 + }, + 'photo': { + 'shareCount': 2266, + 'loveCount': 56, + 'totalInteractionCount': 12548, + 'wowCount': 74, + 'sadCount': 953, + 'angryCount': 356, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.5120559425606103, + 'likeCount': 5265, + 'hahaCount': 2934, + 'commentCount': 644 + } + }, + 'subscriberData': { + 'initialCount': 2450489, + 'finalCount': 2450538 + } + }, { + 'account': { + 'id': 4010, + 'name': 'CREDO Mobile', + 'handle': 'CREDO', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/999271_10151925847135968_1940376022_n.png?_nc_cat=1&_nc_oc=AQmkEi4kvJX6wR9R0wgtJW7cJsK92vaVeoDNi3KsZ73HZ11pK-zDJUhBvAU8294ZxOM&_nc_ht=scontent.xx&oh=c3f09575f02dbb46fc74b68e6b9ac627&oe=5DFCD329', + 'subscriberCount': 686618, + 'url': 'https://www.facebook.com/6851405967', + 'platform': 'Facebook', + 'platformId': '6851405967', + 'verified': True + }, + 'summary': { + 'loveCount': 2464, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 25985, + 'wowCount': 838, + 'thankfulCount': 0, + 'interactionRate': 0.172005112090969, + 'likeCount': 10372, + 'hahaCount': 414, + 'commentCount': 1631, + 'shareCount': 6526, + 'sadCount': 530, + 'angryCount': 3210, + 'totalVideoTimeMS': 184617, + 'postCount': 22 + }, + 'breakdown': { + 'link': { + 'shareCount': 3818, + 'loveCount': 1618, + 'totalInteractionCount': 14259, + 'wowCount': 714, + 'sadCount': 450, + 'angryCount': 2101, + 'thankfulCount': 0, + 'postCount': 14, + 'interactionRate': 0.14826520246283356, + 'likeCount': 4354, + 'hahaCount': 136, + 'commentCount': 1068 + }, + 'photo': { + 'shareCount': 2434, + 'loveCount': 440, + 'totalInteractionCount': 10211, + 'wowCount': 114, + 'sadCount': 80, + 'angryCount': 1109, + 'thankfulCount': 0, + 'postCount': 6, + 'interactionRate': 0.24773979311324157, + 'likeCount': 5257, + 'hahaCount': 278, + 'commentCount': 499 + }, + 'share': { + 'loveCount': 149, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 576, + 'wowCount': 6, + 'thankfulCount': 0, + 'interactionRate': 0.0838907235939019, + 'likeCount': 304, + 'hahaCount': 0, + 'commentCount': 29, + 'shareCount': 88, + 'sadCount': 0, + 'angryCount': 0, + 'totalVideoTimeMS': 184617, + 'postCount': 1 + }, + 'video': { + 'shareCount': 186, + 'loveCount': 257, + 'totalInteractionCount': 939, + 'wowCount': 4, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.13675935669214215, + 'likeCount': 457, + 'hahaCount': 0, + 'commentCount': 35 + } + }, + 'subscriberData': { + 'initialCount': 686597, + 'finalCount': 686618 + } + }, { + 'account': { + 'id': 10247, + 'name': 'NowThis', + 'handle': 'NowThisNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/28379313_1840609126029203_6405012222846484702_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQkFdmIYy2uPLXX0xb7b7uQjQ-yiayvSBaPWqSlby_pCoW_1_Iybmu7xSmUb-UMr1gc&_nc_ht=scontent.xx&oh=add01854d7218f79e9aad6351846e535&oe=5E0CA890', + 'subscriberCount': 14558656, + 'url': 'https://www.facebook.com/341163402640457', + 'platform': 'Facebook', + 'platformId': '341163402640457', + 'verified': True + }, + 'summary': { + 'loveCount': 1378, + 'threePlusMinuteVideoCount': 11, + 'totalInteractionCount': 25669, + 'wowCount': 678, + 'thankfulCount': 0, + 'interactionRate': 0.009795233258952069, + 'likeCount': 7300, + 'hahaCount': 1634, + 'commentCount': 3654, + 'shareCount': 5764, + 'sadCount': 1389, + 'angryCount': 3872, + 'totalVideoTimeMS': 3773150, + 'postCount': 18 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 498, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 4671, + 'wowCount': 214, + 'thankfulCount': 0, + 'interactionRate': 0.008016155128469189, + 'likeCount': 2036, + 'hahaCount': 63, + 'commentCount': 198, + 'shareCount': 1121, + 'sadCount': 495, + 'angryCount': 46, + 'totalVideoTimeMS': 760141, + 'postCount': 4 + }, + 'owned_video': { + 'loveCount': 1209, + 'threePlusMinuteVideoCount': 7, + 'totalInteractionCount': 12700, + 'wowCount': 353, + 'thankfulCount': 0, + 'interactionRate': 0.007926857770568506, + 'likeCount': 5548, + 'hahaCount': 405, + 'commentCount': 1742, + 'shareCount': 2653, + 'sadCount': 542, + 'angryCount': 248, + 'totalVideoTimeMS': 1839681, + 'postCount': 11 + }, + 'crosspost': { + 'loveCount': 711, + 'threePlusMinuteVideoCount': 5, + 'totalInteractionCount': 8029, + 'wowCount': 139, + 'thankfulCount': 0, + 'interactionRate': 0.00787877457785275, + 'likeCount': 3512, + 'hahaCount': 342, + 'commentCount': 1544, + 'shareCount': 1532, + 'sadCount': 47, + 'angryCount': 202, + 'totalVideoTimeMS': 1079540, + 'postCount': 7 + }, + 'share': { + 'loveCount': 169, + 'threePlusMinuteVideoCount': 4, + 'totalInteractionCount': 12969, + 'wowCount': 325, + 'thankfulCount': 0, + 'interactionRate': 0.01272143898708221, + 'likeCount': 1752, + 'hahaCount': 1229, + 'commentCount': 1912, + 'shareCount': 3111, + 'sadCount': 847, + 'angryCount': 3624, + 'totalVideoTimeMS': 1933469, + 'postCount': 7 + } + }, + 'subscriberData': { + 'initialCount': 14557547, + 'finalCount': 14558656 + } + }, { + 'account': { + 'id': 17943, + 'name': 'Bill Maher', + 'handle': 'Maher', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/48420187_10156168579487297_4906298196289912832_n.jpg?_nc_cat=1&_nc_oc=AQlC_apxui0Ni6EzTgjAOj02RffSNKA6MYVKQtmsFrJCXWFg9cA4llD0r9MEzEWZUvY&_nc_ht=scontent.xx&oh=f92277539c631c4b38ee16136d790a4f&oe=5DF40276', + 'subscriberCount': 3966346, + 'url': 'https://www.facebook.com/62507427296', + 'platform': 'Facebook', + 'platformId': '62507427296', + 'verified': True + }, + 'summary': { + 'loveCount': 1611, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 25278, + 'wowCount': 85, + 'thankfulCount': 0, + 'interactionRate': 0.3186502279890824, + 'likeCount': 12339, + 'hahaCount': 3218, + 'commentCount': 1499, + 'shareCount': 6434, + 'sadCount': 43, + 'angryCount': 49, + 'totalVideoTimeMS': 827303, + 'postCount': 2 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 1611, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 25278, + 'wowCount': 85, + 'thankfulCount': 0, + 'interactionRate': 0.3186502279890824, + 'likeCount': 12339, + 'hahaCount': 3218, + 'commentCount': 1499, + 'shareCount': 6434, + 'sadCount': 43, + 'angryCount': 49, + 'totalVideoTimeMS': 827303, + 'postCount': 2 + }, + 'owned_video': { + 'loveCount': 1611, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 25278, + 'wowCount': 85, + 'thankfulCount': 0, + 'interactionRate': 0.3186502279890824, + 'likeCount': 12339, + 'hahaCount': 3218, + 'commentCount': 1499, + 'shareCount': 6434, + 'sadCount': 43, + 'angryCount': 49, + 'totalVideoTimeMS': 827303, + 'postCount': 2 + } + }, + 'subscriberData': { + 'initialCount': 3966490, + 'finalCount': 3966346 + } + }, { + 'account': { + 'id': 30245, + 'name': 'IJR Red', + 'handle': 'IJRRed', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/23376285_10156265164197971_2450414612163288246_n.jpg?_nc_cat=1&_nc_oc=AQm4KDy-Qmj38dJbaAQ0KXPVdY94zu7JBQAIUkAO2_W0uRWIl-5aI18nffFvxZoVICg&_nc_ht=scontent.xx&oh=ab7b4676afa9874079a36c20150411f5&oe=5E0C3B40', + 'subscriberCount': 8531658, + 'url': 'https://www.facebook.com/189885532970', + 'platform': 'Facebook', + 'platformId': '189885532970', + 'verified': True + }, + 'summary': { + 'loveCount': 407, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 23146, + 'wowCount': 560, + 'thankfulCount': 0, + 'interactionRate': 0.03390793789748867, + 'likeCount': 6042, + 'hahaCount': 3595, + 'commentCount': 4576, + 'shareCount': 4828, + 'sadCount': 147, + 'angryCount': 2991, + 'totalVideoTimeMS': 370215, + 'postCount': 8 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 286, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 5441, + 'wowCount': 17, + 'thankfulCount': 0, + 'interactionRate': 0.03188025962017601, + 'likeCount': 2375, + 'hahaCount': 317, + 'commentCount': 1095, + 'shareCount': 673, + 'sadCount': 19, + 'angryCount': 659, + 'totalVideoTimeMS': 370215, + 'postCount': 2 + }, + 'crosspost': { + 'loveCount': 286, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 5441, + 'wowCount': 17, + 'thankfulCount': 0, + 'interactionRate': 0.03188025962017601, + 'likeCount': 2375, + 'hahaCount': 317, + 'commentCount': 1095, + 'shareCount': 673, + 'sadCount': 19, + 'angryCount': 659, + 'totalVideoTimeMS': 370215, + 'postCount': 2 + }, + 'link': { + 'shareCount': 4155, + 'loveCount': 121, + 'totalInteractionCount': 17705, + 'wowCount': 543, + 'sadCount': 128, + 'angryCount': 2332, + 'thankfulCount': 0, + 'postCount': 6, + 'interactionRate': 0.034576016867470305, + 'likeCount': 3667, + 'hahaCount': 3278, + 'commentCount': 3481 + } + }, + 'subscriberData': { + 'initialCount': 8532193, + 'finalCount': 8531658 + } + }, { + 'account': { + 'id': 19471, + 'name': 'CNN Politics', + 'handle': 'cnnpolitics', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22450067_1835100979865060_6024097554775073207_n.png?_nc_cat=1&_nc_oc=AQmpWGKTrzg30Lmmy5ncZ5txlFyDirtObkp2leejFgez6t02RAflIlctecGiymX0NU8&_nc_ht=scontent.xx&oh=bbc41bdb10ef689246595025fc23b309&oe=5E070315', + 'subscriberCount': 2855693, + 'url': 'https://www.facebook.com/219367258105115', + 'platform': 'Facebook', + 'platformId': '219367258105115', + 'verified': True + }, + 'summary': { + 'shareCount': 3683, + 'loveCount': 1448, + 'totalInteractionCount': 21344, + 'wowCount': 476, + 'sadCount': 125, + 'angryCount': 1501, + 'thankfulCount': 0, + 'postCount': 14, + 'interactionRate': 0.05336895933155728, + 'likeCount': 7316, + 'hahaCount': 2685, + 'commentCount': 4110 + }, + 'breakdown': { + 'link': { + 'shareCount': 3683, + 'loveCount': 1448, + 'totalInteractionCount': 21344, + 'wowCount': 476, + 'sadCount': 125, + 'angryCount': 1501, + 'thankfulCount': 0, + 'postCount': 14, + 'interactionRate': 0.05336895933155728, + 'likeCount': 7316, + 'hahaCount': 2685, + 'commentCount': 4110 + } + }, + 'subscriberData': { + 'initialCount': 2855492, + 'finalCount': 2855693 + } + }, { + 'account': { + 'id': 6786, + 'name': 'The Young Turks', + 'handle': 'TheYoungTurks', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1003713_10151543513399205_523422522_n.jpg?_nc_cat=1&_nc_oc=AQnnXFBTIz-GDK79X4ZL1tWD8ZS5F3y_makkEyxpcCf_7U3QmoBvJjb9aWlpiMT8dro&_nc_ht=scontent.xx&oh=5684bdb9a01611f4ca6e9ea9dedbc57e&oe=5DF64CB5', + 'subscriberCount': 2100186, + 'url': 'https://www.facebook.com/210277954204', + 'platform': 'Facebook', + 'platformId': '210277954204', + 'verified': True + }, + 'summary': { + 'loveCount': 751, + 'threePlusMinuteVideoCount': 22, + 'totalInteractionCount': 20554, + 'wowCount': 589, + 'thankfulCount': 0, + 'interactionRate': 0.02571346533229654, + 'likeCount': 6504, + 'hahaCount': 1670, + 'commentCount': 2256, + 'shareCount': 5521, + 'sadCount': 420, + 'angryCount': 2843, + 'totalVideoTimeMS': 16356669, + 'postCount': 38 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 77, + 'threePlusMinuteVideoCount': 5, + 'totalInteractionCount': 2183, + 'wowCount': 48, + 'thankfulCount': 0, + 'interactionRate': 0.020761242379409798, + 'likeCount': 890, + 'hahaCount': 359, + 'commentCount': 306, + 'shareCount': 439, + 'sadCount': 24, + 'angryCount': 40, + 'totalVideoTimeMS': 2126332, + 'postCount': 5 + }, + 'owned_video': { + 'loveCount': 459, + 'threePlusMinuteVideoCount': 21, + 'totalInteractionCount': 16329, + 'wowCount': 488, + 'thankfulCount': 0, + 'interactionRate': 0.03533220606771117, + 'likeCount': 4873, + 'hahaCount': 1162, + 'commentCount': 1770, + 'shareCount': 4805, + 'sadCount': 388, + 'angryCount': 2384, + 'totalVideoTimeMS': 9264722, + 'postCount': 22 + }, + 'crosspost': { + 'loveCount': 382, + 'threePlusMinuteVideoCount': 16, + 'totalInteractionCount': 14146, + 'wowCount': 440, + 'thankfulCount': 0, + 'interactionRate': 0.039617783623093934, + 'likeCount': 3983, + 'hahaCount': 803, + 'commentCount': 1464, + 'shareCount': 4366, + 'sadCount': 364, + 'angryCount': 2344, + 'totalVideoTimeMS': 7138390, + 'postCount': 17 + }, + 'share': { + 'loveCount': 27, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 323, + 'wowCount': 2, + 'thankfulCount': 0, + 'interactionRate': 0.015380461670984783, + 'likeCount': 179, + 'hahaCount': 9, + 'commentCount': 23, + 'shareCount': 80, + 'sadCount': 0, + 'angryCount': 3, + 'totalVideoTimeMS': 7091947, + 'postCount': 1 + }, + 'video': { + 'shareCount': 636, + 'loveCount': 265, + 'totalInteractionCount': 3902, + 'wowCount': 99, + 'sadCount': 32, + 'angryCount': 456, + 'thankfulCount': 0, + 'postCount': 15, + 'interactionRate': 0.012380557382216852, + 'likeCount': 1452, + 'hahaCount': 499, + 'commentCount': 463 + } + }, + 'subscriberData': { + 'initialCount': 2099948, + 'finalCount': 2100186 + } + }, { + 'account': { + 'id': 650861, + 'name': 'Daily Wire', + 'handle': 'DailyWire', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/27655057_1815794295383060_2228253987427136016_n.png?_nc_cat=1&_nc_oc=AQm_uPD8ZwlgfmUIjiJBxewrWpNXIPkUpDdGdWdkYu9LXrRzIuUYx8pGdp5Kmcz1HU8&_nc_ht=scontent.xx&oh=ab8e2768dce63a6200349ce2d7dc8a11&oe=5DF6BB9F', + 'subscriberCount': 1934601, + 'url': 'https://www.facebook.com/1435071773455316', + 'platform': 'Facebook', + 'platformId': '1435071773455316', + 'verified': True + }, + 'summary': { + 'loveCount': 320, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 20071, + 'wowCount': 506, + 'thankfulCount': 0, + 'interactionRate': 0.01364644339569, + 'likeCount': 6269, + 'hahaCount': 3786, + 'commentCount': 4376, + 'shareCount': 2066, + 'sadCount': 427, + 'angryCount': 2321, + 'totalVideoTimeMS': 628754, + 'postCount': 76 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 28, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 1360, + 'wowCount': 41, + 'thankfulCount': 0, + 'interactionRate': 0.03514992995859545, + 'likeCount': 766, + 'hahaCount': 78, + 'commentCount': 125, + 'shareCount': 272, + 'sadCount': 15, + 'angryCount': 35, + 'totalVideoTimeMS': 628754, + 'postCount': 2 + }, + 'owned_video': { + 'loveCount': 28, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 1360, + 'wowCount': 41, + 'thankfulCount': 0, + 'interactionRate': 0.03514992995859545, + 'likeCount': 766, + 'hahaCount': 78, + 'commentCount': 125, + 'shareCount': 272, + 'sadCount': 15, + 'angryCount': 35, + 'totalVideoTimeMS': 628754, + 'postCount': 2 + }, + 'link': { + 'shareCount': 1794, + 'loveCount': 292, + 'totalInteractionCount': 18711, + 'wowCount': 465, + 'sadCount': 412, + 'angryCount': 2286, + 'thankfulCount': 0, + 'postCount': 74, + 'interactionRate': 0.013026150514067726, + 'likeCount': 5503, + 'hahaCount': 3708, + 'commentCount': 4251 + } + }, + 'subscriberData': { + 'initialCount': 1934539, + 'finalCount': 1934601 + } + }, { + 'account': { + 'id': 18752, + 'name': 'New York Daily News', + 'handle': 'NYDailyNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/34963357_10155516739962541_1916910854155010048_n.jpg?_nc_cat=1&_nc_oc=AQmjFK4eo-CK8fL21CSJr1btV3Al6e74byD7EyXVL8apaCEHf5ql7TW_ZRkUiYID0qY&_nc_ht=scontent.xx&oh=e33f579d2d00c6afc68a0e7cbd70b6c8&oe=5E0623E1', + 'subscriberCount': 3120017, + 'url': 'https://www.facebook.com/268914272540', + 'platform': 'Facebook', + 'platformId': '268914272540', + 'verified': True + }, + 'summary': { + 'shareCount': 4828, + 'loveCount': 287, + 'totalInteractionCount': 20002, + 'wowCount': 1060, + 'sadCount': 2842, + 'angryCount': 2438, + 'thankfulCount': 0, + 'postCount': 61, + 'interactionRate': 0.010481274817903877, + 'likeCount': 4230, + 'hahaCount': 1160, + 'commentCount': 3157 + }, + 'breakdown': { + 'link': { + 'shareCount': 4624, + 'loveCount': 284, + 'totalInteractionCount': 19262, + 'wowCount': 1040, + 'sadCount': 2833, + 'angryCount': 2400, + 'thankfulCount': 0, + 'postCount': 59, + 'interactionRate': 0.010449221989714567, + 'likeCount': 3961, + 'hahaCount': 1058, + 'commentCount': 3062 + }, + 'photo': { + 'shareCount': 204, + 'loveCount': 3, + 'totalInteractionCount': 740, + 'wowCount': 20, + 'sadCount': 9, + 'angryCount': 38, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.011859546430044143, + 'likeCount': 269, + 'hahaCount': 102, + 'commentCount': 95 + } + }, + 'subscriberData': { + 'initialCount': 3119682, + 'finalCount': 3120017 + } + }, { + 'account': { + 'id': 6648, + 'name': 'Business Insider', + 'handle': 'businessinsider', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/20140008_10154867513079071_8190657407315988923_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkI55CBCj4kJdip-PX9AJ_S4mxJ5XQ4nlum3ikySzQgBRQCJSXsyjHW-8w8qPH2aX4&_nc_ht=scontent.xx&oh=4d024551fc98af700d89602c6980c3c0&oe=5E155CB9', + 'subscriberCount': 9107575, + 'url': 'https://www.facebook.com/20446254070', + 'platform': 'Facebook', + 'platformId': '20446254070', + 'verified': True + }, + 'summary': { + 'loveCount': 698, + 'threePlusMinuteVideoCount': 5, + 'totalInteractionCount': 19946, + 'wowCount': 577, + 'thankfulCount': 0, + 'interactionRate': 0.0010211595794074279, + 'likeCount': 8305, + 'hahaCount': 1471, + 'commentCount': 3626, + 'shareCount': 4146, + 'sadCount': 379, + 'angryCount': 744, + 'totalVideoTimeMS': 1519862, + 'postCount': 213 + }, + 'breakdown': { + 'link': { + 'shareCount': 3877, + 'loveCount': 658, + 'totalInteractionCount': 19022, + 'wowCount': 538, + 'sadCount': 379, + 'angryCount': 744, + 'thankfulCount': 0, + 'postCount': 208, + 'interactionRate': 0.0009991991583449024, + 'likeCount': 7804, + 'hahaCount': 1471, + 'commentCount': 3551 + }, + 'share': { + 'loveCount': 40, + 'threePlusMinuteVideoCount': 5, + 'totalInteractionCount': 924, + 'wowCount': 39, + 'thankfulCount': 0, + 'interactionRate': 0.00202035873775233, + 'likeCount': 501, + 'hahaCount': 0, + 'commentCount': 75, + 'shareCount': 269, + 'sadCount': 0, + 'angryCount': 0, + 'totalVideoTimeMS': 1519862, + 'postCount': 5 + } + }, + 'subscriberData': { + 'initialCount': 9107012, + 'finalCount': 9107575 + } + }, { + 'account': { + 'id': 379565, + 'name': 'Chicks On The Right', + 'handle': 'ChicksOnTheRight', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64300377_10161966021395494_5710723125331623936_n.jpg?_nc_cat=1&_nc_oc=AQnx_5K6idsI6rYjNhjov9DcFdSjNH3jYlaOOOdv1Kp4cYJLSRoNvw8yqjeqQEzQtfE&_nc_ht=scontent.xx&oh=31f36fd2da3350e594e6e30d67309d20&oe=5DF779FF', + 'subscriberCount': 1257694, + 'url': 'https://www.facebook.com/195530355493', + 'platform': 'Facebook', + 'platformId': '195530355493', + 'verified': True + }, + 'summary': { + 'shareCount': 2639, + 'loveCount': 434, + 'totalInteractionCount': 19527, + 'wowCount': 368, + 'sadCount': 618, + 'angryCount': 1640, + 'thankfulCount': 0, + 'postCount': 21, + 'interactionRate': 0.07386425753103353, + 'likeCount': 6224, + 'hahaCount': 3694, + 'commentCount': 3910 + }, + 'breakdown': { + 'link': { + 'shareCount': 352, + 'loveCount': 150, + 'totalInteractionCount': 3459, + 'wowCount': 143, + 'sadCount': 192, + 'angryCount': 507, + 'thankfulCount': 0, + 'postCount': 13, + 'interactionRate': 0.02114950753848753, + 'likeCount': 1110, + 'hahaCount': 453, + 'commentCount': 552 + }, + 'photo': { + 'shareCount': 2287, + 'loveCount': 284, + 'totalInteractionCount': 16068, + 'wowCount': 225, + 'sadCount': 426, + 'angryCount': 1133, + 'thankfulCount': 0, + 'postCount': 8, + 'interactionRate': 0.15965492908752993, + 'likeCount': 5114, + 'hahaCount': 3241, + 'commentCount': 3358 + } + }, + 'subscriberData': { + 'initialCount': 1257731, + 'finalCount': 1257694 + } + }, { + 'account': { + 'id': 7777, + 'name': 'PBS NewsHour', + 'handle': 'newshour', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/c2.0.200.200a/p200x200/303161_10150312469923675_881915800_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlncoeS4CvKUmO2uTUydTKWAioHD0iWx6bl9DqkBkwnCZgpb6CCkyZj7aidr38Ug1k&_nc_ht=scontent.xx&oh=0d6d1417f6b982eac877d479f2404a37&oe=5E0E2C5A', + 'subscriberCount': 1417219, + 'url': 'https://www.facebook.com/6491828674', + 'platform': 'Facebook', + 'platformId': '6491828674', + 'verified': True + }, + 'summary': { + 'shareCount': 4169, + 'loveCount': 214, + 'totalInteractionCount': 18916, + 'wowCount': 579, + 'sadCount': 3167, + 'angryCount': 2882, + 'thankfulCount': 0, + 'postCount': 21, + 'interactionRate': 0.06350568305301454, + 'likeCount': 5238, + 'hahaCount': 693, + 'commentCount': 1974 + }, + 'breakdown': { + 'link': { + 'shareCount': 4169, + 'loveCount': 214, + 'totalInteractionCount': 18916, + 'wowCount': 579, + 'sadCount': 3167, + 'angryCount': 2882, + 'thankfulCount': 0, + 'postCount': 21, + 'interactionRate': 0.06350568305301454, + 'likeCount': 5238, + 'hahaCount': 693, + 'commentCount': 1974 + } + }, + 'subscriberData': { + 'initialCount': 1417173, + 'finalCount': 1417219 + } + }, { + 'account': { + 'id': 2392855, + 'name': 'Alexandria Ocasio-Cortez', + 'handle': 'OcasioCortez', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22730535_1481259015298111_3425440004620635228_n.jpg?_nc_cat=1&_nc_oc=AQmDWxgsbW3C7swpQPubkVVpHgc5Qco0_dIf4b9uxBEeL7goF8cpcBerkfe1h4kNuyA&_nc_ht=scontent.xx&oh=22b0c36140209b2ef9d9e8033f843cf8&oe=5E05DF27', + 'subscriberCount': 959516, + 'url': 'https://www.facebook.com/1316372698453411', + 'platform': 'Facebook', + 'platformId': '1316372698453411', + 'verified': True + }, + 'summary': { + 'shareCount': 1550, + 'loveCount': 476, + 'totalInteractionCount': 18378, + 'wowCount': 324, + 'sadCount': 85, + 'angryCount': 2460, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.9578215579167252, + 'likeCount': 11326, + 'hahaCount': 479, + 'commentCount': 1678 + }, + 'breakdown': { + 'link': { + 'shareCount': 1550, + 'loveCount': 476, + 'totalInteractionCount': 18378, + 'wowCount': 324, + 'sadCount': 85, + 'angryCount': 2460, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.9578215579167252, + 'likeCount': 11326, + 'hahaCount': 479, + 'commentCount': 1678 + } + }, + 'subscriberData': { + 'initialCount': 959213, + 'finalCount': 959516 + } + }, { + 'account': { + 'id': 6892, + 'name': 'TheBlaze', + 'handle': 'TheBlaze', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/47350623_2141870595850269_7864140219111440384_n.png?_nc_cat=1&_nc_oc=AQmGyVQswjmmaInAkgMKbLJ62jAcb2BShbL78435-MqCEBLedhKr7VO97Nzxt2x220k&_nc_ht=scontent.xx&oh=4a5ce0b44b6400aab9bb78aa2afdee87&oe=5E011864', + 'subscriberCount': 2089166, + 'url': 'https://www.facebook.com/140738092630206', + 'platform': 'Facebook', + 'platformId': '140738092630206', + 'verified': True + }, + 'summary': { + 'loveCount': 444, + 'threePlusMinuteVideoCount': 3, + 'totalInteractionCount': 17210, + 'wowCount': 231, + 'thankfulCount': 0, + 'interactionRate': 0.03743126731405527, + 'likeCount': 4069, + 'hahaCount': 3750, + 'commentCount': 3896, + 'shareCount': 2670, + 'sadCount': 420, + 'angryCount': 1730, + 'totalVideoTimeMS': 1284363, + 'postCount': 22 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 10, + 'totalInteractionCount': 548, + 'wowCount': 4, + 'thankfulCount': 0, + 'interactionRate': 0.026230606762279146, + 'likeCount': 265, + 'hahaCount': 0, + 'commentCount': 20, + 'shareCount': 242, + 'sadCount': 1, + 'angryCount': 6, + 'totalVideoTimeMS': 176169, + 'postCount': 1 + }, + 'crosspost': { + 'loveCount': 10, + 'totalInteractionCount': 548, + 'wowCount': 4, + 'thankfulCount': 0, + 'interactionRate': 0.026230606762279146, + 'likeCount': 265, + 'hahaCount': 0, + 'commentCount': 20, + 'shareCount': 242, + 'sadCount': 1, + 'angryCount': 6, + 'totalVideoTimeMS': 176169, + 'postCount': 1 + }, + 'link': { + 'shareCount': 2361, + 'loveCount': 400, + 'totalInteractionCount': 16005, + 'wowCount': 224, + 'sadCount': 419, + 'angryCount': 1724, + 'thankfulCount': 0, + 'postCount': 17, + 'interactionRate': 0.045041972560774954, + 'likeCount': 3295, + 'hahaCount': 3732, + 'commentCount': 3850 + }, + 'share': { + 'loveCount': 34, + 'threePlusMinuteVideoCount': 3, + 'totalInteractionCount': 657, + 'wowCount': 3, + 'thankfulCount': 0, + 'interactionRate': 0.007850035600390107, + 'likeCount': 509, + 'hahaCount': 18, + 'commentCount': 26, + 'shareCount': 67, + 'sadCount': 0, + 'angryCount': 0, + 'totalVideoTimeMS': 1108194, + 'postCount': 4 + } + }, + 'subscriberData': { + 'initialCount': 2089159, + 'finalCount': 2089166 + } + }, { + 'account': { + 'id': 35336, + 'name': 'CNSNews.com', + 'handle': 'cnsnewscom', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22852132_10154846552035927_4321589519758565624_n.png?_nc_cat=1&_nc_oc=AQnmIVH1IVnR6t9S83Hf7HR7IsOdXqL8cDu8PCeK2_FzNcj6tfQmG0zCCYrZQZkrhjk&_nc_ht=scontent.xx&oh=8c7180ce5469f10781bebacf2fff223c&oe=5E092B8F', + 'subscriberCount': 2316273, + 'url': 'https://www.facebook.com/19420215926', + 'platform': 'Facebook', + 'platformId': '19420215926', + 'verified': True + }, + 'summary': { + 'loveCount': 620, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 15415, + 'wowCount': 235, + 'thankfulCount': 0, + 'interactionRate': 0.06652921899834453, + 'likeCount': 5723, + 'hahaCount': 1227, + 'commentCount': 3450, + 'shareCount': 2027, + 'sadCount': 499, + 'angryCount': 1634, + 'totalVideoTimeMS': 868494, + 'postCount': 10 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 44, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 1955, + 'wowCount': 17, + 'thankfulCount': 0, + 'interactionRate': 0.021068305562097427, + 'likeCount': 572, + 'hahaCount': 402, + 'commentCount': 384, + 'shareCount': 345, + 'sadCount': 14, + 'angryCount': 177, + 'totalVideoTimeMS': 868494, + 'postCount': 4 + }, + 'crosspost': { + 'loveCount': 44, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 1955, + 'wowCount': 17, + 'thankfulCount': 0, + 'interactionRate': 0.021068305562097427, + 'likeCount': 572, + 'hahaCount': 402, + 'commentCount': 384, + 'shareCount': 345, + 'sadCount': 14, + 'angryCount': 177, + 'totalVideoTimeMS': 868494, + 'postCount': 4 + }, + 'link': { + 'shareCount': 1682, + 'loveCount': 576, + 'totalInteractionCount': 13460, + 'wowCount': 218, + 'sadCount': 485, + 'angryCount': 1457, + 'thankfulCount': 0, + 'postCount': 6, + 'interactionRate': 0.09683649462250929, + 'likeCount': 5151, + 'hahaCount': 825, + 'commentCount': 3066 + } + }, + 'subscriberData': { + 'initialCount': 2316278, + 'finalCount': 2316273 + } + }, { + 'account': { + 'id': 13844, + 'name': 'HuffPost Politics', + 'handle': 'HuffPostPolitics', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/18838902_10155124699752911_6971495653588629046_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQm5cko-OrQpOcPI-GqgP9V74INLYLzur0WIBNnrYmgNA33fLG0VMMxSWpg2i7235p0&_nc_ht=scontent.xx&oh=755100a2afdbaf29d5e08e613e66fc6e&oe=5DF42A6A', + 'subscriberCount': 2107783, + 'url': 'https://www.facebook.com/56845382910', + 'platform': 'Facebook', + 'platformId': '56845382910', + 'verified': True + }, + 'summary': { + 'shareCount': 2778, + 'loveCount': 162, + 'totalInteractionCount': 14995, + 'wowCount': 532, + 'sadCount': 619, + 'angryCount': 2138, + 'thankfulCount': 0, + 'postCount': 18, + 'interactionRate': 0.03951897859807728, + 'likeCount': 2983, + 'hahaCount': 2425, + 'commentCount': 3358 + }, + 'breakdown': { + 'link': { + 'shareCount': 2778, + 'loveCount': 162, + 'totalInteractionCount': 14995, + 'wowCount': 532, + 'sadCount': 619, + 'angryCount': 2138, + 'thankfulCount': 0, + 'postCount': 18, + 'interactionRate': 0.03951897859807728, + 'likeCount': 2983, + 'hahaCount': 2425, + 'commentCount': 3358 + } + }, + 'subscriberData': { + 'initialCount': 2107913, + 'finalCount': 2107783 + } + }, { + 'account': { + 'id': 10275, + 'name': 'POLITICO', + 'handle': 'politico', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12366404_10153208310706680_910786628287677974_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQma-VwdF4pnf7Qd94b720eyuT713ikAnUhD26cnrAU-j-98Nm8RizAuzhqPFP_BlLY&_nc_ht=scontent.xx&oh=706731dfa1a42c518d02de9a35c97541&oe=5E029DCC', + 'subscriberCount': 1879882, + 'url': 'https://www.facebook.com/62317591679', + 'platform': 'Facebook', + 'platformId': '62317591679', + 'verified': True + }, + 'summary': { + 'shareCount': 1794, + 'loveCount': 132, + 'totalInteractionCount': 14656, + 'wowCount': 358, + 'sadCount': 443, + 'angryCount': 5090, + 'thankfulCount': 0, + 'postCount': 10, + 'interactionRate': 0.0779381968718853, + 'likeCount': 1825, + 'hahaCount': 1630, + 'commentCount': 3384 + }, + 'breakdown': { + 'link': { + 'shareCount': 1794, + 'loveCount': 132, + 'totalInteractionCount': 14656, + 'wowCount': 358, + 'sadCount': 443, + 'angryCount': 5090, + 'thankfulCount': 0, + 'postCount': 10, + 'interactionRate': 0.0779381968718853, + 'likeCount': 1825, + 'hahaCount': 1630, + 'commentCount': 3384 + } + }, + 'subscriberData': { + 'initialCount': 1879507, + 'finalCount': 1879882 + } + }, { + 'account': { + 'id': 10342, + 'name': 'New York Post', + 'handle': 'NYPost', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12932928_10157483552025206_1176575955706691041_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQnPmbZuC7S1v1NTPRZ7rWQU4EucwAW3nKx-aXD0PzlPsD3ifQpdaLcXEegH730Wy_o&_nc_ht=scontent.xx&oh=c77d86309611fa2972df1979bf6cab9e&oe=5E0827CA', + 'subscriberCount': 4183079, + 'url': 'https://www.facebook.com/134486075205', + 'platform': 'Facebook', + 'platformId': '134486075205', + 'verified': True + }, + 'summary': { + 'shareCount': 3305, + 'loveCount': 374, + 'totalInteractionCount': 13825, + 'wowCount': 993, + 'sadCount': 538, + 'angryCount': 313, + 'thankfulCount': 0, + 'postCount': 29, + 'interactionRate': 0.011379394140496551, + 'likeCount': 3600, + 'hahaCount': 2201, + 'commentCount': 2501 + }, + 'breakdown': { + 'link': { + 'shareCount': 3305, + 'loveCount': 374, + 'totalInteractionCount': 13825, + 'wowCount': 993, + 'sadCount': 538, + 'angryCount': 313, + 'thankfulCount': 0, + 'postCount': 29, + 'interactionRate': 0.011379394140496551, + 'likeCount': 3600, + 'hahaCount': 2201, + 'commentCount': 2501 + } + }, + 'subscriberData': { + 'initialCount': 4182920, + 'finalCount': 4183079 + } + }, { + 'account': { + 'id': 6646, + 'name': 'VICE', + 'handle': 'VICE', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13427861_1304295039603751_2178102892370936049_n.jpg?_nc_cat=1&_nc_oc=AQmzoEUjC5BCCMVSsIFvWa52KGr3Iqh9f0Y_eezqYMFw7h_EUam7WQdYxEFvJB6LoP0&_nc_ht=scontent.xx&oh=847f8eb6c5132c90382bc0940afbc692&oe=5E02C5BA', + 'subscriberCount': 8177544, + 'url': 'https://www.facebook.com/167115176655082', + 'platform': 'Facebook', + 'platformId': '167115176655082', + 'verified': True + }, + 'summary': { + 'loveCount': 660, + 'threePlusMinuteVideoCount': 5, + 'totalInteractionCount': 13003, + 'wowCount': 322, + 'thankfulCount': 0, + 'interactionRate': 0.00360819017583995, + 'likeCount': 5818, + 'hahaCount': 1403, + 'commentCount': 2892, + 'shareCount': 1641, + 'sadCount': 92, + 'angryCount': 175, + 'totalVideoTimeMS': 1654965, + 'postCount': 44 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 116, + 'threePlusMinuteVideoCount': 5, + 'totalInteractionCount': 2014, + 'wowCount': 113, + 'thankfulCount': 0, + 'interactionRate': 0.004916923561653084, + 'likeCount': 733, + 'hahaCount': 266, + 'commentCount': 398, + 'shareCount': 339, + 'sadCount': 17, + 'angryCount': 32, + 'totalVideoTimeMS': 1654965, + 'postCount': 5 + }, + 'crosspost': { + 'loveCount': 116, + 'threePlusMinuteVideoCount': 5, + 'totalInteractionCount': 2014, + 'wowCount': 113, + 'thankfulCount': 0, + 'interactionRate': 0.004916923561653084, + 'likeCount': 733, + 'hahaCount': 266, + 'commentCount': 398, + 'shareCount': 339, + 'sadCount': 17, + 'angryCount': 32, + 'totalVideoTimeMS': 1654965, + 'postCount': 5 + }, + 'link': { + 'shareCount': 1191, + 'loveCount': 472, + 'totalInteractionCount': 9861, + 'wowCount': 165, + 'sadCount': 65, + 'angryCount': 141, + 'thankfulCount': 0, + 'postCount': 36, + 'interactionRate': 0.003339104806794259, + 'likeCount': 4538, + 'hahaCount': 1021, + 'commentCount': 2268 + }, + 'photo': { + 'shareCount': 111, + 'loveCount': 72, + 'totalInteractionCount': 1128, + 'wowCount': 44, + 'sadCount': 10, + 'angryCount': 2, + 'thankfulCount': 0, + 'postCount': 3, + 'interactionRate': 0.0045989135800536315, + 'likeCount': 547, + 'hahaCount': 116, + 'commentCount': 226 + } + }, + 'subscriberData': { + 'initialCount': 8174144, + 'finalCount': 8177544 + } + }, { + 'account': { + 'id': 13991, + 'name': 'Washington Examiner', + 'handle': 'WashingtonExaminer', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/36928610_10156017618514160_6905131952433528832_n.jpg?_nc_cat=111&_nc_oc=AQnKuEJBvxlMgc-zQHzSfEtsgFfHehn1pucacRbqrYlmmQp69EGwogOuyEUo-OV8OWM&_nc_ht=scontent.xx&oh=88b1063a5362110cc87fb9d6caedea35&oe=5DFE6885', + 'subscriberCount': 714626, + 'url': 'https://www.facebook.com/40656699159', + 'platform': 'Facebook', + 'platformId': '40656699159', + 'verified': True + }, + 'summary': { + 'shareCount': 1899, + 'loveCount': 295, + 'totalInteractionCount': 11994, + 'wowCount': 117, + 'sadCount': 75, + 'angryCount': 1175, + 'thankfulCount': 0, + 'postCount': 20, + 'interactionRate': 0.08381942301731732, + 'likeCount': 3007, + 'hahaCount': 1864, + 'commentCount': 3562 + }, + 'breakdown': { + 'link': { + 'shareCount': 1899, + 'loveCount': 295, + 'totalInteractionCount': 11994, + 'wowCount': 117, + 'sadCount': 75, + 'angryCount': 1175, + 'thankfulCount': 0, + 'postCount': 20, + 'interactionRate': 0.08381942301731732, + 'likeCount': 3007, + 'hahaCount': 1864, + 'commentCount': 3562 + } + }, + 'subscriberData': { + 'initialCount': 714637, + 'finalCount': 714626 + } + }, { + 'account': { + 'id': 1431632, + 'name': 'Axios', + 'handle': 'axiosnews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/46844445_2289864377926961_9207563348864925696_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQncZ-V-nWa7ihCtPUY2OE7NX8kzbdrK9hiEMhqNa6qBeOKkh3VKYYgS2lvKd-xjZnI&_nc_ht=scontent.xx&oh=3fa348414b7b9cfcabc2cd5bc93789f4&oe=5E0F6422', + 'subscriberCount': 339358, + 'url': 'https://www.facebook.com/1830665590513511', + 'platform': 'Facebook', + 'platformId': '1830665590513511', + 'verified': True + }, + 'summary': { + 'shareCount': 2393, + 'loveCount': 404, + 'totalInteractionCount': 11534, + 'wowCount': 376, + 'sadCount': 820, + 'angryCount': 1586, + 'thankfulCount': 0, + 'postCount': 26, + 'interactionRate': 0.13054426349313464, + 'likeCount': 3017, + 'hahaCount': 847, + 'commentCount': 2091 + }, + 'breakdown': { + 'link': { + 'shareCount': 2393, + 'loveCount': 404, + 'totalInteractionCount': 11534, + 'wowCount': 376, + 'sadCount': 820, + 'angryCount': 1586, + 'thankfulCount': 0, + 'postCount': 26, + 'interactionRate': 0.13054426349313464, + 'likeCount': 3017, + 'hahaCount': 847, + 'commentCount': 2091 + } + }, + 'subscriberData': { + 'initialCount': 339339, + 'finalCount': 339358 + } + }, { + 'account': { + 'id': 44680, + 'name': 'NBC Nightly News with Lester Holt', + 'handle': 'nbcnightlynews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15267620_10154852089253689_1498794491363762450_n.jpg?_nc_cat=1&_nc_oc=AQl8slY4bj3RknUwuA59x-FHZ63POsUD31th1TnRryuOSYs7o8qHGIfTo5RiNj1hZx4&_nc_ht=scontent.xx&oh=2eaaf6b66b32d649aa32cbc311d57be4&oe=5E129FF0', + 'subscriberCount': 3494646, + 'url': 'https://www.facebook.com/114288853688', + 'platform': 'Facebook', + 'platformId': '114288853688', + 'verified': True + }, + 'summary': { + 'loveCount': 691, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 11384, + 'wowCount': 887, + 'thankfulCount': 0, + 'interactionRate': 0.009843653311681112, + 'likeCount': 3937, + 'hahaCount': 417, + 'commentCount': 1173, + 'shareCount': 1879, + 'sadCount': 1939, + 'angryCount': 461, + 'totalVideoTimeMS': 939583, + 'postCount': 33 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 104, + 'totalInteractionCount': 870, + 'wowCount': 16, + 'thankfulCount': 0, + 'interactionRate': 0.0049790571983503295, + 'likeCount': 488, + 'hahaCount': 7, + 'commentCount': 93, + 'shareCount': 159, + 'sadCount': 2, + 'angryCount': 1, + 'totalVideoTimeMS': 479375, + 'postCount': 5 + }, + 'owned_video': { + 'loveCount': 254, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 1714, + 'wowCount': 32, + 'thankfulCount': 0, + 'interactionRate': 0.005436901538428521, + 'likeCount': 981, + 'hahaCount': 10, + 'commentCount': 127, + 'shareCount': 303, + 'sadCount': 5, + 'angryCount': 2, + 'totalVideoTimeMS': 939583, + 'postCount': 9 + }, + 'crosspost': { + 'loveCount': 150, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 844, + 'wowCount': 16, + 'thankfulCount': 0, + 'interactionRate': 0.006037822234781147, + 'likeCount': 493, + 'hahaCount': 3, + 'commentCount': 34, + 'shareCount': 144, + 'sadCount': 3, + 'angryCount': 1, + 'totalVideoTimeMS': 460208, + 'postCount': 4 + }, + 'link': { + 'shareCount': 1576, + 'loveCount': 437, + 'totalInteractionCount': 9670, + 'wowCount': 855, + 'sadCount': 1934, + 'angryCount': 459, + 'thankfulCount': 0, + 'postCount': 24, + 'interactionRate': 0.011503339044464555, + 'likeCount': 2956, + 'hahaCount': 407, + 'commentCount': 1046 + } + }, + 'subscriberData': { + 'initialCount': 3494629, + 'finalCount': 3494646 + } + }, { + 'account': { + 'id': 10343, + 'name': 'Bloomberg', + 'handle': 'bloombergbusiness', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/31790536_10156383343951880_9143173959372505088_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQm0CmNHVi4wKjfV2xKZ8WmMFbjVnwkn6rwlbqPewk5wTL0Plzu-cY8b0zLOAhS4DLw&_nc_ht=scontent.xx&oh=6eda22b5a7936ec78ea6929b3ed38430&oe=5E1356BD', + 'subscriberCount': 2955809, + 'url': 'https://www.facebook.com/266790296879', + 'platform': 'Facebook', + 'platformId': '266790296879', + 'verified': True + }, + 'summary': { + 'loveCount': 284, + 'threePlusMinuteVideoCount': 3, + 'totalInteractionCount': 11343, + 'wowCount': 436, + 'thankfulCount': 0, + 'interactionRate': 0.008526067860395113, + 'likeCount': 4732, + 'hahaCount': 812, + 'commentCount': 1565, + 'shareCount': 2536, + 'sadCount': 711, + 'angryCount': 267, + 'totalVideoTimeMS': 645754, + 'postCount': 45 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 7, + 'threePlusMinuteVideoCount': 3, + 'totalInteractionCount': 284, + 'wowCount': 8, + 'thankfulCount': 0, + 'interactionRate': 0.0031803586463378594, + 'likeCount': 186, + 'hahaCount': 2, + 'commentCount': 16, + 'shareCount': 65, + 'sadCount': 0, + 'angryCount': 0, + 'totalVideoTimeMS': 645754, + 'postCount': 3 + }, + 'crosspost': { + 'loveCount': 7, + 'threePlusMinuteVideoCount': 3, + 'totalInteractionCount': 284, + 'wowCount': 8, + 'thankfulCount': 0, + 'interactionRate': 0.0031803586463378594, + 'likeCount': 186, + 'hahaCount': 2, + 'commentCount': 16, + 'shareCount': 65, + 'sadCount': 0, + 'angryCount': 0, + 'totalVideoTimeMS': 645754, + 'postCount': 3 + }, + 'link': { + 'shareCount': 2304, + 'loveCount': 260, + 'totalInteractionCount': 10139, + 'wowCount': 401, + 'sadCount': 581, + 'angryCount': 238, + 'thankfulCount': 0, + 'postCount': 40, + 'interactionRate': 0.008559901463015728, + 'likeCount': 4109, + 'hahaCount': 793, + 'commentCount': 1453 + }, + 'photo': { + 'shareCount': 167, + 'loveCount': 17, + 'totalInteractionCount': 920, + 'wowCount': 27, + 'sadCount': 130, + 'angryCount': 29, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.01556345720548314, + 'likeCount': 437, + 'hahaCount': 17, + 'commentCount': 96 + } + }, + 'subscriberData': { + 'initialCount': 2955474, + 'finalCount': 2955809 + } + }, { + 'account': { + 'id': 48738, + 'name': 'MRCTV', + 'handle': 'mrctv', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22788750_1672001339489574_3161704397036092004_n.png?_nc_cat=1&_nc_oc=AQnzYjYzWh8Ym4hvIw-6PRgWMQ8rhpJPlJayuXuhwN8Mr623OiX9U49qXT5nl-JMBTI&_nc_ht=scontent.xx&oh=d2d096127dd32ca06b4a0144b43f5673&oe=5E1233A4', + 'subscriberCount': 3452690, + 'url': 'https://www.facebook.com/201956993160690', + 'platform': 'Facebook', + 'platformId': '201956993160690', + 'verified': True + }, + 'summary': { + 'loveCount': 96, + 'totalInteractionCount': 11058, + 'wowCount': 156, + 'thankfulCount': 0, + 'interactionRate': 0.02667444405795624, + 'likeCount': 1653, + 'hahaCount': 3724, + 'commentCount': 2709, + 'shareCount': 1490, + 'sadCount': 131, + 'angryCount': 1099, + 'totalVideoTimeMS': 55337, + 'postCount': 12 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 3, + 'totalInteractionCount': 1529, + 'wowCount': 13, + 'thankfulCount': 0, + 'interactionRate': 0.04428363188340401, + 'likeCount': 231, + 'hahaCount': 455, + 'commentCount': 139, + 'shareCount': 517, + 'sadCount': 7, + 'angryCount': 164, + 'totalVideoTimeMS': 55337, + 'postCount': 1 + }, + 'crosspost': { + 'loveCount': 3, + 'totalInteractionCount': 1529, + 'wowCount': 13, + 'thankfulCount': 0, + 'interactionRate': 0.04428363188340401, + 'likeCount': 231, + 'hahaCount': 455, + 'commentCount': 139, + 'shareCount': 517, + 'sadCount': 7, + 'angryCount': 164, + 'totalVideoTimeMS': 55337, + 'postCount': 1 + }, + 'link': { + 'shareCount': 973, + 'loveCount': 93, + 'totalInteractionCount': 9529, + 'wowCount': 143, + 'sadCount': 124, + 'angryCount': 935, + 'thankfulCount': 0, + 'postCount': 11, + 'interactionRate': 0.02508150765927264, + 'likeCount': 1422, + 'hahaCount': 3269, + 'commentCount': 2570 + } + }, + 'subscriberData': { + 'initialCount': 3452796, + 'finalCount': 3452690 + } + }, { + 'account': { + 'id': 10323, + 'name': 'Reuters', + 'handle': 'Reuters', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/51325614_2292147310805612_3874403780548100096_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlLN3v5RKOKT6LVQj--bvulAczkWupv1AuwaG14c3MkOAyF9oLoLGad6n1Rl6FhN6k&_nc_ht=scontent.xx&oh=73deaf953fbb14e82a9c92b2f850db23&oe=5E0ACADC', + 'subscriberCount': 4154522, + 'url': 'https://www.facebook.com/114050161948682', + 'platform': 'Facebook', + 'platformId': '114050161948682', + 'verified': True + }, + 'summary': { + 'shareCount': 1261, + 'loveCount': 737, + 'totalInteractionCount': 10822, + 'wowCount': 338, + 'sadCount': 383, + 'angryCount': 226, + 'thankfulCount': 0, + 'postCount': 65, + 'interactionRate': 0.003995766413272492, + 'likeCount': 5427, + 'hahaCount': 561, + 'commentCount': 1889 + }, + 'breakdown': { + 'native_video': { + 'shareCount': 517, + 'loveCount': 257, + 'totalInteractionCount': 3437, + 'wowCount': 136, + 'sadCount': 69, + 'angryCount': 47, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.041353775289169524, + 'likeCount': 1610, + 'hahaCount': 22, + 'commentCount': 779 + }, + 'owned_video': { + 'shareCount': 517, + 'loveCount': 257, + 'totalInteractionCount': 3437, + 'wowCount': 136, + 'sadCount': 69, + 'angryCount': 47, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.041353775289169524, + 'likeCount': 1610, + 'hahaCount': 22, + 'commentCount': 779 + }, + 'link': { + 'shareCount': 744, + 'loveCount': 480, + 'totalInteractionCount': 7385, + 'wowCount': 202, + 'sadCount': 314, + 'angryCount': 179, + 'thankfulCount': 0, + 'postCount': 63, + 'interactionRate': 0.0028162931948968765, + 'likeCount': 3817, + 'hahaCount': 539, + 'commentCount': 1110 + } + }, + 'subscriberData': { + 'initialCount': 4154272, + 'finalCount': 4154522 + } + }, { + 'account': { + 'id': 3927, + 'name': 'Daily Kos', + 'handle': 'dailykos', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/69689928_10157802442554255_4623408062214963200_n.jpg?_nc_cat=1&_nc_oc=AQmiFQCe9HEd-7S8-YBXHpZ33b_hLFcVPCNB05lp1enuXRMKyX6Abs1TL5YJfZH8Z8M&_nc_ht=scontent.xx&oh=8492c84250d2d1c80f95be3523063b3b&oe=5DF8500E', + 'subscriberCount': 1321301, + 'url': 'https://www.facebook.com/43179984254', + 'platform': 'Facebook', + 'platformId': '43179984254', + 'verified': True + }, + 'summary': { + 'shareCount': 2484, + 'loveCount': 1084, + 'totalInteractionCount': 10447, + 'wowCount': 361, + 'sadCount': 62, + 'angryCount': 2277, + 'thankfulCount': 0, + 'postCount': 12, + 'interactionRate': 0.06584419447196362, + 'likeCount': 3305, + 'hahaCount': 203, + 'commentCount': 671 + }, + 'breakdown': { + 'link': { + 'shareCount': 2484, + 'loveCount': 1084, + 'totalInteractionCount': 10447, + 'wowCount': 361, + 'sadCount': 62, + 'angryCount': 2277, + 'thankfulCount': 0, + 'postCount': 12, + 'interactionRate': 0.06584419447196362, + 'likeCount': 3305, + 'hahaCount': 203, + 'commentCount': 671 + } + }, + 'subscriberData': { + 'initialCount': 1321301, + 'finalCount': 1321301 + } + }, { + 'account': { + 'id': 7202, + 'name': 'Washington Free Beacon', + 'handle': 'FreeBeacon', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/24176871_1495617353820616_2845320948710215498_n.png?_nc_cat=106&_nc_oc=AQnbg3-OcKtFvYeoc0i6GQ70lXyqvHbqPfUtkh_KFVS0k6oj3r-LxtkTIuKuAo2QRgs&_nc_ht=scontent.xx&oh=8831d07df2000939df5c7d0225ca20ff&oe=5DF909CE', + 'subscriberCount': 740989, + 'url': 'https://www.facebook.com/282024895179874', + 'platform': 'Facebook', + 'platformId': '282024895179874', + 'verified': True + }, + 'summary': { + 'shareCount': 2690, + 'loveCount': 10, + 'totalInteractionCount': 10222, + 'wowCount': 90, + 'sadCount': 60, + 'angryCount': 3237, + 'thankfulCount': 0, + 'postCount': 7, + 'interactionRate': 0.19703183683604658, + 'likeCount': 553, + 'hahaCount': 880, + 'commentCount': 2702 + }, + 'breakdown': { + 'link': { + 'shareCount': 2690, + 'loveCount': 10, + 'totalInteractionCount': 10222, + 'wowCount': 90, + 'sadCount': 60, + 'angryCount': 3237, + 'thankfulCount': 0, + 'postCount': 7, + 'interactionRate': 0.19703183683604658, + 'likeCount': 553, + 'hahaCount': 880, + 'commentCount': 2702 + } + }, + 'subscriberData': { + 'initialCount': 741005, + 'finalCount': 740989 + } + }, { + 'account': { + 'id': 7199, + 'name': 'Sean Hannity', + 'handle': 'SeanHannity', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/10156129_10154243847245389_4273859834170256486_n.jpg?_nc_cat=1&_nc_oc=AQnKVAkiwo1ZI_RMY4AghlSvZHwn_Go__As4BkMehqcDTXHydiCFDIgYZxe2FAsrjWA&_nc_ht=scontent.xx&oh=f15e3e465f2a2934e585dfce76c17331&oe=5E0C747A', + 'subscriberCount': 3206935, + 'url': 'https://www.facebook.com/69813760388', + 'platform': 'Facebook', + 'platformId': '69813760388', + 'verified': True + }, + 'summary': { + 'loveCount': 362, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 9944, + 'wowCount': 202, + 'thankfulCount': 0, + 'interactionRate': 0.028188294062528745, + 'likeCount': 3629, + 'hahaCount': 1594, + 'commentCount': 1923, + 'shareCount': 881, + 'sadCount': 56, + 'angryCount': 1297, + 'totalVideoTimeMS': 1004775, + 'postCount': 11 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 210, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 2456, + 'wowCount': 7, + 'thankfulCount': 0, + 'interactionRate': 0.0765823564353657, + 'likeCount': 1801, + 'hahaCount': 20, + 'commentCount': 172, + 'shareCount': 241, + 'sadCount': 1, + 'angryCount': 4, + 'totalVideoTimeMS': 498857, + 'postCount': 1 + }, + 'owned_video': { + 'loveCount': 221, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 3779, + 'wowCount': 45, + 'thankfulCount': 0, + 'interactionRate': 0.05890230916384602, + 'likeCount': 2420, + 'hahaCount': 334, + 'commentCount': 312, + 'shareCount': 430, + 'sadCount': 11, + 'angryCount': 6, + 'totalVideoTimeMS': 1004775, + 'postCount': 2 + }, + 'crosspost': { + 'loveCount': 11, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 1323, + 'wowCount': 38, + 'thankfulCount': 0, + 'interactionRate': 0.04125344363354594, + 'likeCount': 619, + 'hahaCount': 314, + 'commentCount': 140, + 'shareCount': 189, + 'sadCount': 10, + 'angryCount': 2, + 'totalVideoTimeMS': 505918, + 'postCount': 1 + }, + 'link': { + 'shareCount': 451, + 'loveCount': 141, + 'totalInteractionCount': 6165, + 'wowCount': 157, + 'sadCount': 45, + 'angryCount': 1291, + 'thankfulCount': 0, + 'postCount': 9, + 'interactionRate': 0.02135949273543384, + 'likeCount': 1209, + 'hahaCount': 1260, + 'commentCount': 1611 + } + }, + 'subscriberData': { + 'initialCount': 3207075, + 'finalCount': 3206935 + } + }, { + 'account': { + 'id': 59767, + 'name': 'AJ+', + 'handle': 'ajplusenglish', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/10675686_452503314891181_8657239428083336114_n.png?_nc_cat=1&_nc_oc=AQlFJD0M_GXyy5E5WBzjW8pOgbfwUdlw6gX0sO4_XgBSfMLbj_1QRzoAuC_OSE3H0tM&_nc_ht=scontent.xx&oh=5d0c3ababdcf031637161a928fbc1086&oe=5DCA0751', + 'subscriberCount': 11131099, + 'url': 'https://www.facebook.com/407570359384477', + 'platform': 'Facebook', + 'platformId': '407570359384477', + 'verified': True + }, + 'summary': { + 'loveCount': 173, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 9604, + 'wowCount': 66, + 'thankfulCount': 0, + 'interactionRate': 0.014373987251531032, + 'likeCount': 2120, + 'hahaCount': 121, + 'commentCount': 560, + 'shareCount': 3416, + 'sadCount': 2136, + 'angryCount': 1012, + 'totalVideoTimeMS': 1831347, + 'postCount': 6 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 8, + 'totalInteractionCount': 7586, + 'wowCount': 40, + 'thankfulCount': 0, + 'interactionRate': 0.03407533352816075, + 'likeCount': 1155, + 'hahaCount': 28, + 'commentCount': 343, + 'shareCount': 3052, + 'sadCount': 2079, + 'angryCount': 881, + 'totalVideoTimeMS': 193318, + 'postCount': 2 + }, + 'owned_video': { + 'loveCount': 173, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 9604, + 'wowCount': 66, + 'thankfulCount': 0, + 'interactionRate': 0.014373987251531032, + 'likeCount': 2120, + 'hahaCount': 121, + 'commentCount': 560, + 'shareCount': 3416, + 'sadCount': 2136, + 'angryCount': 1012, + 'totalVideoTimeMS': 1831347, + 'postCount': 6 + }, + 'crosspost': { + 'loveCount': 165, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 2018, + 'wowCount': 26, + 'thankfulCount': 0, + 'interactionRate': 0.004527805984232275, + 'likeCount': 965, + 'hahaCount': 93, + 'commentCount': 217, + 'shareCount': 364, + 'sadCount': 57, + 'angryCount': 131, + 'totalVideoTimeMS': 1638029, + 'postCount': 4 + } + }, + 'subscriberData': { + 'initialCount': 11131339, + 'finalCount': 11131099 + } + }, { + 'account': { + 'id': 10284, + 'name': 'The New Yorker', + 'handle': 'newyorker', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1174822_10151878824588869_2070117374_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQno9Opk1N_2uuxM9xMCbaLh-8w7vk3rWYzY5iX2B0axGmTGyU1kkZY1RTndOiqUuAE&_nc_ht=scontent.xx&oh=e4a5a2194344ddb52a1e83254332bea3&oe=5DC7CED7', + 'subscriberCount': 4287325, + 'url': 'https://www.facebook.com/9258148868', + 'platform': 'Facebook', + 'platformId': '9258148868', + 'verified': True + }, + 'summary': { + 'shareCount': 2613, + 'loveCount': 230, + 'totalInteractionCount': 9559, + 'wowCount': 653, + 'sadCount': 413, + 'angryCount': 921, + 'thankfulCount': 0, + 'postCount': 26, + 'interactionRate': 0.008560272893102834, + 'likeCount': 3747, + 'hahaCount': 322, + 'commentCount': 660 + }, + 'breakdown': { + 'link': { + 'shareCount': 2613, + 'loveCount': 230, + 'totalInteractionCount': 9559, + 'wowCount': 653, + 'sadCount': 413, + 'angryCount': 921, + 'thankfulCount': 0, + 'postCount': 26, + 'interactionRate': 0.008560272893102834, + 'likeCount': 3747, + 'hahaCount': 322, + 'commentCount': 660 + } + }, + 'subscriberData': { + 'initialCount': 4287168, + 'finalCount': 4287325 + } + }, { + 'account': { + 'id': 16406, + 'name': 'TIME', + 'handle': 'time', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10372522_10152195008896491_2022604163270194960_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkWdxZsjSYaIb0mLyEuq2eqxcvi6f98JCGl-qbaXtMNC9m3vw71t4X881vCIALzL7I&_nc_ht=scontent.xx&oh=01fa11e373104495d36fd77682a53514&oe=5E0D29D6', + 'subscriberCount': 12917303, + 'url': 'https://www.facebook.com/10606591490', + 'platform': 'Facebook', + 'platformId': '10606591490', + 'verified': True + }, + 'summary': { + 'loveCount': 627, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 9096, + 'wowCount': 211, + 'thankfulCount': 0, + 'interactionRate': 0.002601108714848309, + 'likeCount': 4794, + 'hahaCount': 202, + 'commentCount': 787, + 'shareCount': 1858, + 'sadCount': 576, + 'angryCount': 41, + 'totalVideoTimeMS': 846327, + 'postCount': 27 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 8, + 'totalInteractionCount': 375, + 'wowCount': 7, + 'thankfulCount': 0, + 'interactionRate': 0.0014476408621328387, + 'likeCount': 127, + 'hahaCount': 5, + 'commentCount': 38, + 'shareCount': 53, + 'sadCount': 137, + 'angryCount': 0, + 'totalVideoTimeMS': 208401, + 'postCount': 2 + }, + 'owned_video': { + 'loveCount': 34, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 1204, + 'wowCount': 91, + 'thankfulCount': 0, + 'interactionRate': 0.001331519937362825, + 'likeCount': 474, + 'hahaCount': 15, + 'commentCount': 163, + 'shareCount': 249, + 'sadCount': 168, + 'angryCount': 10, + 'totalVideoTimeMS': 846327, + 'postCount': 7 + }, + 'crosspost': { + 'loveCount': 26, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 829, + 'wowCount': 84, + 'thankfulCount': 0, + 'interactionRate': 0.0012773301724701518, + 'likeCount': 347, + 'hahaCount': 10, + 'commentCount': 125, + 'shareCount': 196, + 'sadCount': 31, + 'angryCount': 10, + 'totalVideoTimeMS': 637926, + 'postCount': 5 + }, + 'link': { + 'shareCount': 1609, + 'loveCount': 593, + 'totalInteractionCount': 7892, + 'wowCount': 120, + 'sadCount': 408, + 'angryCount': 31, + 'thankfulCount': 0, + 'postCount': 20, + 'interactionRate': 0.003050109623959029, + 'likeCount': 4320, + 'hahaCount': 187, + 'commentCount': 624 + } + }, + 'subscriberData': { + 'initialCount': 12917834, + 'finalCount': 12917303 + } + }, { + 'account': { + 'id': 765761, + 'name': 'The Blacksphere', + 'handle': 'theblacksphere.net', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/18119422_10154519215947596_8821577492859645295_n.png?_nc_cat=105&_nc_oc=AQkg0Hf-P17_kRiBglxNNjSQbh-fwZuFfcRNPqUcPH7EQalrULeQ4iA16aZvUqiWsvg&_nc_ht=scontent.xx&oh=44f59681d9adc96cbc0688df3addc493&oe=5E13F29A', + 'subscriberCount': 577927, + 'url': 'https://www.facebook.com/49867377595', + 'platform': 'Facebook', + 'platformId': '49867377595', + 'verified': False + }, + 'summary': { + 'shareCount': 2420, + 'loveCount': 41, + 'totalInteractionCount': 8827, + 'wowCount': 243, + 'sadCount': 123, + 'angryCount': 1467, + 'thankfulCount': 0, + 'postCount': 19, + 'interactionRate': 0.08028452559025996, + 'likeCount': 1529, + 'hahaCount': 1682, + 'commentCount': 1322 + }, + 'breakdown': { + 'link': { + 'shareCount': 1780, + 'loveCount': 6, + 'totalInteractionCount': 6313, + 'wowCount': 240, + 'sadCount': 121, + 'angryCount': 1466, + 'thankfulCount': 0, + 'postCount': 18, + 'interactionRate': 0.06055944818230816, + 'likeCount': 535, + 'hahaCount': 878, + 'commentCount': 1287 + }, + 'photo': { + 'shareCount': 640, + 'loveCount': 35, + 'totalInteractionCount': 2514, + 'wowCount': 3, + 'sadCount': 2, + 'angryCount': 1, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.4349898649437792, + 'likeCount': 994, + 'hahaCount': 804, + 'commentCount': 35 + } + }, + 'subscriberData': { + 'initialCount': 577962, + 'finalCount': 577927 + } + }, { + 'account': { + 'id': 16340, + 'name': 'NewsBusters.org', + 'handle': 'newsbusters', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22894025_10156065135861178_6465017462954076657_n.png?_nc_cat=1&_nc_oc=AQkwnX53WKVe23PNYVstnqmppMCnDaKdcHy8l_Lt9nOPecFbnoIjtVzWaEHsJ9zjqHo&_nc_ht=scontent.xx&oh=6f2998202b70329d6ff64e16c4e48a8f&oe=5E13E3DA', + 'subscriberCount': 2919313, + 'url': 'https://www.facebook.com/6333396177', + 'platform': 'Facebook', + 'platformId': '6333396177', + 'verified': True + }, + 'summary': { + 'loveCount': 249, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 8748, + 'wowCount': 165, + 'thankfulCount': 0, + 'interactionRate': 0.029938181423324176, + 'likeCount': 3436, + 'hahaCount': 1751, + 'commentCount': 1661, + 'shareCount': 1165, + 'sadCount': 79, + 'angryCount': 242, + 'totalVideoTimeMS': 386345, + 'postCount': 10 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 6, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 815, + 'wowCount': 12, + 'thankfulCount': 0, + 'interactionRate': 0.027917182906189015, + 'likeCount': 170, + 'hahaCount': 276, + 'commentCount': 186, + 'shareCount': 122, + 'sadCount': 6, + 'angryCount': 37, + 'totalVideoTimeMS': 386345, + 'postCount': 1 + }, + 'crosspost': { + 'loveCount': 6, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 815, + 'wowCount': 12, + 'thankfulCount': 0, + 'interactionRate': 0.027917182906189015, + 'likeCount': 170, + 'hahaCount': 276, + 'commentCount': 186, + 'shareCount': 122, + 'sadCount': 6, + 'angryCount': 37, + 'totalVideoTimeMS': 386345, + 'postCount': 1 + }, + 'link': { + 'shareCount': 1043, + 'loveCount': 243, + 'totalInteractionCount': 7933, + 'wowCount': 153, + 'sadCount': 73, + 'angryCount': 205, + 'thankfulCount': 0, + 'postCount': 9, + 'interactionRate': 0.030177960908408005, + 'likeCount': 3266, + 'hahaCount': 1475, + 'commentCount': 1475 + } + }, + 'subscriberData': { + 'initialCount': 2919385, + 'finalCount': 2919313 + } + }, { + 'account': { + 'id': 44528, + 'name': 'Vox', + 'handle': 'Vox', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15327441_612869972233942_727410529402189533_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQnoAo-srh87mkvD-DKqEDzFi4nn14JVBUE8HPqgKgoKz2LtUzKnd7p6NTRpO6WA_Gg&_nc_ht=scontent.xx&oh=ffdab33a30a7adbfde40574c198f8580&oe=5DF8E26D', + 'subscriberCount': 2426279, + 'url': 'https://www.facebook.com/223649167822693', + 'platform': 'Facebook', + 'platformId': '223649167822693', + 'verified': True + }, + 'summary': { + 'loveCount': 319, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 8598, + 'wowCount': 288, + 'thankfulCount': 0, + 'interactionRate': 0.016857786844620047, + 'likeCount': 3431, + 'hahaCount': 163, + 'commentCount': 653, + 'shareCount': 1691, + 'sadCount': 329, + 'angryCount': 1724, + 'totalVideoTimeMS': 265115, + 'postCount': 21 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 47, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 761, + 'wowCount': 20, + 'thankfulCount': 0, + 'interactionRate': 0.01566249144487926, + 'likeCount': 443, + 'hahaCount': 5, + 'commentCount': 54, + 'shareCount': 172, + 'sadCount': 19, + 'angryCount': 1, + 'totalVideoTimeMS': 265115, + 'postCount': 2 + }, + 'crosspost': { + 'loveCount': 47, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 761, + 'wowCount': 20, + 'thankfulCount': 0, + 'interactionRate': 0.01566249144487926, + 'likeCount': 443, + 'hahaCount': 5, + 'commentCount': 54, + 'shareCount': 172, + 'sadCount': 19, + 'angryCount': 1, + 'totalVideoTimeMS': 265115, + 'postCount': 2 + }, + 'link': { + 'shareCount': 1519, + 'loveCount': 272, + 'totalInteractionCount': 7837, + 'wowCount': 268, + 'sadCount': 310, + 'angryCount': 1723, + 'thankfulCount': 0, + 'postCount': 19, + 'interactionRate': 0.016981438092869096, + 'likeCount': 2988, + 'hahaCount': 158, + 'commentCount': 599 + } + }, + 'subscriberData': { + 'initialCount': 2426078, + 'finalCount': 2426279 + } + }, { + 'account': { + 'id': 327932, + 'name': 'Media Research Center', + 'handle': 'mediaresearchcenter', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22814117_10155267787348717_9035099093135610710_n.png?_nc_cat=1&_nc_oc=AQnlBU3OCfeS-5QWg2v10Je2qqRgOr8VQS088-pc6gM4VZ_wGRCLBF_h5ObNobn7SOE&_nc_ht=scontent.xx&oh=4444f21775a6df49bc0c533f492d5953&oe=5E0B587B', + 'subscriberCount': 1853272, + 'url': 'https://www.facebook.com/10498053716', + 'platform': 'Facebook', + 'platformId': '10498053716', + 'verified': True + }, + 'summary': { + 'loveCount': 195, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 8478, + 'wowCount': 155, + 'thankfulCount': 0, + 'interactionRate': 0.03809474475981425, + 'likeCount': 2225, + 'hahaCount': 1599, + 'commentCount': 1820, + 'shareCount': 1380, + 'sadCount': 405, + 'angryCount': 699, + 'totalVideoTimeMS': 386345, + 'postCount': 12 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 4, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 511, + 'wowCount': 4, + 'thankfulCount': 0, + 'interactionRate': 0.027572825173180004, + 'likeCount': 124, + 'hahaCount': 162, + 'commentCount': 98, + 'shareCount': 78, + 'sadCount': 5, + 'angryCount': 36, + 'totalVideoTimeMS': 386345, + 'postCount': 1 + }, + 'crosspost': { + 'loveCount': 4, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 511, + 'wowCount': 4, + 'thankfulCount': 0, + 'interactionRate': 0.027572825173180004, + 'likeCount': 124, + 'hahaCount': 162, + 'commentCount': 98, + 'shareCount': 78, + 'sadCount': 5, + 'angryCount': 36, + 'totalVideoTimeMS': 386345, + 'postCount': 1 + }, + 'link': { + 'shareCount': 1302, + 'loveCount': 191, + 'totalInteractionCount': 7967, + 'wowCount': 151, + 'sadCount': 400, + 'angryCount': 663, + 'thankfulCount': 0, + 'postCount': 11, + 'interactionRate': 0.03906599887550357, + 'likeCount': 2101, + 'hahaCount': 1437, + 'commentCount': 1722 + } + }, + 'subscriberData': { + 'initialCount': 1853276, + 'finalCount': 1853272 + } + }, { + 'account': { + 'id': 546413, + 'name': 'PJ Media', + 'handle': 'PJMedia', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/11233498_10153918103746159_4425260475851381266_n.jpg?_nc_cat=1&_nc_oc=AQlsQcaTBN0IYmuAz9KhN7jR3MPlfGRQ6pQx6vtSV9AWa6eNztotI3-NTLX1xGzJ6zE&_nc_ht=scontent.xx&oh=15f625aebc03c1c0e428efec7e19fab3&oe=5E04568A', + 'subscriberCount': 345163, + 'url': 'https://www.facebook.com/15418366158', + 'platform': 'Facebook', + 'platformId': '15418366158', + 'verified': True + }, + 'summary': { + 'shareCount': 2584, + 'loveCount': 46, + 'totalInteractionCount': 8327, + 'wowCount': 270, + 'sadCount': 158, + 'angryCount': 1748, + 'thankfulCount': 0, + 'postCount': 17, + 'interactionRate': 0.14167568436743544, + 'likeCount': 1001, + 'hahaCount': 842, + 'commentCount': 1678 + }, + 'breakdown': { + 'link': { + 'shareCount': 2584, + 'loveCount': 46, + 'totalInteractionCount': 8327, + 'wowCount': 270, + 'sadCount': 158, + 'angryCount': 1748, + 'thankfulCount': 0, + 'postCount': 17, + 'interactionRate': 0.14167568436743544, + 'likeCount': 1001, + 'hahaCount': 842, + 'commentCount': 1678 + } + }, + 'subscriberData': { + 'initialCount': 345146, + 'finalCount': 345163 + } + }, { + 'account': { + 'id': 1165716, + 'name': 'Don Lemon CNN', + 'handle': 'donlemoncnn', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/18057083_1290293507691218_2466909210169106234_n.jpg?_nc_cat=1&_nc_oc=AQnVGMIs0e2R-6KqoSknArDh_vaQ99IJS6k-qf8dfQ8_11ygvd9iWteTgaDp-e325os&_nc_ht=scontent.xx&oh=041b801f1ae88fb46a42c942efe1775f&oe=5E089245', + 'subscriberCount': 638421, + 'url': 'https://www.facebook.com/1009804299073475', + 'platform': 'Facebook', + 'platformId': '1009804299073475', + 'verified': True + }, + 'summary': { + 'loveCount': 606, + 'threePlusMinuteVideoCount': 5, + 'totalInteractionCount': 7887, + 'wowCount': 170, + 'thankfulCount': 0, + 'interactionRate': 0.24702226176527717, + 'likeCount': 3047, + 'hahaCount': 345, + 'commentCount': 1274, + 'shareCount': 1548, + 'sadCount': 412, + 'angryCount': 485, + 'totalVideoTimeMS': 1615012, + 'postCount': 5 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 606, + 'threePlusMinuteVideoCount': 5, + 'totalInteractionCount': 7887, + 'wowCount': 170, + 'thankfulCount': 0, + 'interactionRate': 0.24702226176527717, + 'likeCount': 3047, + 'hahaCount': 345, + 'commentCount': 1274, + 'shareCount': 1548, + 'sadCount': 412, + 'angryCount': 485, + 'totalVideoTimeMS': 1615012, + 'postCount': 5 + }, + 'crosspost': { + 'loveCount': 606, + 'threePlusMinuteVideoCount': 5, + 'totalInteractionCount': 7887, + 'wowCount': 170, + 'thankfulCount': 0, + 'interactionRate': 0.24702226176527717, + 'likeCount': 3047, + 'hahaCount': 345, + 'commentCount': 1274, + 'shareCount': 1548, + 'sadCount': 412, + 'angryCount': 485, + 'totalVideoTimeMS': 1615012, + 'postCount': 5 + } + }, + 'subscriberData': { + 'initialCount': 638387, + 'finalCount': 638421 + } + }, { + 'account': { + 'id': 3998, + 'name': 'The Daily Show', + 'handle': 'thedailyshow', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/48368386_10157135026436800_3277713629201301504_n.jpg?_nc_cat=1&_nc_oc=AQnBJgtgBUq5JxUf7vm9iBepQKcPv9l_LwUxs4ocWc-OZKl6FlGJg8q-djlalcukEM8&_nc_ht=scontent.xx&oh=b8d75b4499115eea65b8a9e51bec6c43&oe=5DFF9198', + 'subscriberCount': 8586527, + 'url': 'https://www.facebook.com/7976226799', + 'platform': 'Facebook', + 'platformId': '7976226799', + 'verified': True + }, + 'summary': { + 'loveCount': 335, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 7864, + 'wowCount': 117, + 'thankfulCount': 0, + 'interactionRate': 0.015257449959494964, + 'likeCount': 3817, + 'hahaCount': 2383, + 'commentCount': 398, + 'shareCount': 720, + 'sadCount': 75, + 'angryCount': 19, + 'totalVideoTimeMS': 842933, + 'postCount': 6 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 142, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 3132, + 'wowCount': 46, + 'thankfulCount': 0, + 'interactionRate': 0.012159372334131864, + 'likeCount': 1621, + 'hahaCount': 879, + 'commentCount': 184, + 'shareCount': 240, + 'sadCount': 15, + 'angryCount': 5, + 'totalVideoTimeMS': 357647, + 'postCount': 3 + }, + 'owned_video': { + 'loveCount': 327, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 5907, + 'wowCount': 68, + 'thankfulCount': 0, + 'interactionRate': 0.013754998780277522, + 'likeCount': 3013, + 'hahaCount': 1567, + 'commentCount': 279, + 'shareCount': 577, + 'sadCount': 59, + 'angryCount': 17, + 'totalVideoTimeMS': 842933, + 'postCount': 5 + }, + 'crosspost': { + 'loveCount': 185, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 2775, + 'wowCount': 22, + 'thankfulCount': 0, + 'interactionRate': 0.01615426190367902, + 'likeCount': 1392, + 'hahaCount': 688, + 'commentCount': 95, + 'shareCount': 337, + 'sadCount': 44, + 'angryCount': 12, + 'totalVideoTimeMS': 485286, + 'postCount': 2 + }, + 'photo': { + 'shareCount': 143, + 'loveCount': 8, + 'totalInteractionCount': 1957, + 'wowCount': 49, + 'sadCount': 16, + 'angryCount': 2, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.022792999672314233, + 'likeCount': 804, + 'hahaCount': 816, + 'commentCount': 119 + } + }, + 'subscriberData': { + 'initialCount': 8585412, + 'finalCount': 8586527 + } + }, { + 'account': { + 'id': 28285, + 'name': 'Townhall.com', + 'handle': 'townhallcom', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/52313832_10158468618829657_401031722176610304_n.png?_nc_cat=1&_nc_oc=AQnJFJyepGbGHljkkVb93rI6SAbiEecWjNxe3C4rS2WFLS18RWTwjScIUwrrUrlWx9o&_nc_ht=scontent.xx&oh=ff685a6018b55bd19ae00229c9d617dc&oe=5DFD7EC4', + 'subscriberCount': 1286652, + 'url': 'https://www.facebook.com/41632789656', + 'platform': 'Facebook', + 'platformId': '41632789656', + 'verified': True + }, + 'summary': { + 'shareCount': 1371, + 'loveCount': 144, + 'totalInteractionCount': 7605, + 'wowCount': 153, + 'sadCount': 54, + 'angryCount': 1036, + 'thankfulCount': 0, + 'postCount': 17, + 'interactionRate': 0.03474032963676537, + 'likeCount': 2585, + 'hahaCount': 792, + 'commentCount': 1470 + }, + 'breakdown': { + 'link': { + 'shareCount': 1371, + 'loveCount': 144, + 'totalInteractionCount': 7605, + 'wowCount': 153, + 'sadCount': 54, + 'angryCount': 1036, + 'thankfulCount': 0, + 'postCount': 17, + 'interactionRate': 0.03474032963676537, + 'likeCount': 2585, + 'hahaCount': 792, + 'commentCount': 1470 + } + }, + 'subscriberData': { + 'initialCount': 1286726, + 'finalCount': 1286652 + } + }, { + 'account': { + 'id': 13493, + 'name': 'The Washington Times', + 'handle': 'TheWashingtonTimes', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/68701924_10158091236369411_4030630617484886016_n.png?_nc_cat=1&_nc_oc=AQn1GXVtNdWnXaGIEHrhLGQyOlmFDwbRgfVrqynMP9L2fBaUV5SpEChCSozGqc-wcGU&_nc_ht=scontent.xx&oh=6c68fde34ca8ee23b09a81bac28e0e22&oe=5DFDF501', + 'subscriberCount': 652390, + 'url': 'https://www.facebook.com/35994014410', + 'platform': 'Facebook', + 'platformId': '35994014410', + 'verified': True + }, + 'summary': { + 'shareCount': 1562, + 'loveCount': 344, + 'totalInteractionCount': 7423, + 'wowCount': 118, + 'sadCount': 74, + 'angryCount': 810, + 'thankfulCount': 0, + 'postCount': 22, + 'interactionRate': 0.05165621790646699, + 'likeCount': 2254, + 'hahaCount': 445, + 'commentCount': 1816 + }, + 'breakdown': { + 'link': { + 'shareCount': 1562, + 'loveCount': 344, + 'totalInteractionCount': 7423, + 'wowCount': 118, + 'sadCount': 74, + 'angryCount': 810, + 'thankfulCount': 0, + 'postCount': 22, + 'interactionRate': 0.05165621790646699, + 'likeCount': 2254, + 'hahaCount': 445, + 'commentCount': 1816 + } + }, + 'subscriberData': { + 'initialCount': 652390, + 'finalCount': 652390 + } + }, { + 'account': { + 'id': 816605, + 'name': 'WND', + 'handle': 'WNDNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/10616184_978685205477070_7301123703638589430_n.jpg?_nc_cat=110&_nc_oc=AQm5V5YpP7PucYw6lh5UcBTbvWDxAw3jNZpGGnOpem7RUhl7KQuT_0RFS9UItcAmqL8&_nc_ht=scontent.xx&oh=42799b825016837895356c7b53b45526&oe=5E0F6F64', + 'subscriberCount': 847147, + 'url': 'https://www.facebook.com/119984188013847', + 'platform': 'Facebook', + 'platformId': '119984188013847', + 'verified': False + }, + 'summary': { + 'shareCount': 1442, + 'loveCount': 160, + 'totalInteractionCount': 7394, + 'wowCount': 75, + 'sadCount': 65, + 'angryCount': 866, + 'thankfulCount': 0, + 'postCount': 4, + 'interactionRate': 0.21813919476182633, + 'likeCount': 2703, + 'hahaCount': 500, + 'commentCount': 1583 + }, + 'breakdown': { + 'link': { + 'shareCount': 1442, + 'loveCount': 160, + 'totalInteractionCount': 7394, + 'wowCount': 75, + 'sadCount': 65, + 'angryCount': 866, + 'thankfulCount': 0, + 'postCount': 4, + 'interactionRate': 0.21813919476182633, + 'likeCount': 2703, + 'hahaCount': 500, + 'commentCount': 1583 + } + }, + 'subscriberData': { + 'initialCount': 847184, + 'finalCount': 847147 + } + }, { + 'account': { + 'id': 19547, + 'name': 'CNBC', + 'handle': 'cnbc', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/58378702_10157689832569369_7768288312980144128_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkQWczZuEUuUs3yQw-GF-LxMEC4qqezMSlw7v7S2I6ANZbjmX4F6ZJhxCxnIUe_qJY&_nc_ht=scontent.xx&oh=dd93c8b01aefbe85d169013c4e19c5a5&oe=5E13A6DF', + 'subscriberCount': 3112193, + 'url': 'https://www.facebook.com/97212224368', + 'platform': 'Facebook', + 'platformId': '97212224368', + 'verified': True + }, + 'summary': { + 'loveCount': 219, + 'totalInteractionCount': 7091, + 'wowCount': 328, + 'thankfulCount': 0, + 'interactionRate': 0.00629798801775647, + 'likeCount': 3504, + 'hahaCount': 113, + 'commentCount': 535, + 'shareCount': 2351, + 'sadCount': 22, + 'angryCount': 19, + 'totalVideoTimeMS': 36841, + 'postCount': 36 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 0, + 'totalInteractionCount': 26, + 'wowCount': 0, + 'thankfulCount': 0, + 'interactionRate': 0.0008354473901105522, + 'likeCount': 17, + 'hahaCount': 1, + 'commentCount': 3, + 'shareCount': 5, + 'sadCount': 0, + 'angryCount': 0, + 'totalVideoTimeMS': 36841, + 'postCount': 1 + }, + 'owned_video': { + 'loveCount': 0, + 'totalInteractionCount': 26, + 'wowCount': 0, + 'thankfulCount': 0, + 'interactionRate': 0.0008354473901105522, + 'likeCount': 17, + 'hahaCount': 1, + 'commentCount': 3, + 'shareCount': 5, + 'sadCount': 0, + 'angryCount': 0, + 'totalVideoTimeMS': 36841, + 'postCount': 1 + }, + 'link': { + 'shareCount': 2346, + 'loveCount': 219, + 'totalInteractionCount': 7065, + 'wowCount': 328, + 'sadCount': 22, + 'angryCount': 19, + 'thankfulCount': 0, + 'postCount': 35, + 'interactionRate': 0.006458650977393115, + 'likeCount': 3487, + 'hahaCount': 112, + 'commentCount': 532 + } + }, + 'subscriberData': { + 'initialCount': 3112017, + 'finalCount': 3112193 + } + }, { + 'account': { + 'id': 4003, + 'name': 'Mother Jones', + 'handle': 'motherjones', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13173930_10153754114402144_7216599223824347020_n.jpg?_nc_cat=1&_nc_oc=AQl6Am3t1g_ne4B7X5hq_O0i1VTjnEJF4vjbAn7XZtJx-l1BO3gpV20BzW2uwlbRLnA&_nc_ht=scontent.xx&oh=365d8d33c49b25f19609c36f1a951716&oe=5E10472B', + 'subscriberCount': 1542486, + 'url': 'https://www.facebook.com/7642602143', + 'platform': 'Facebook', + 'platformId': '7642602143', + 'verified': True + }, + 'summary': { + 'loveCount': 114, + 'totalInteractionCount': 7080, + 'wowCount': 391, + 'thankfulCount': 0, + 'interactionRate': 0.057374466611950226, + 'likeCount': 2534, + 'hahaCount': 94, + 'commentCount': 495, + 'shareCount': 1470, + 'sadCount': 299, + 'angryCount': 1683, + 'totalVideoTimeMS': 154259, + 'postCount': 8 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 38, + 'totalInteractionCount': 313, + 'wowCount': 1, + 'thankfulCount': 0, + 'interactionRate': 0.020291760507955277, + 'likeCount': 206, + 'hahaCount': 2, + 'commentCount': 15, + 'shareCount': 51, + 'sadCount': 0, + 'angryCount': 0, + 'totalVideoTimeMS': 154259, + 'postCount': 1 + }, + 'owned_video': { + 'loveCount': 38, + 'totalInteractionCount': 313, + 'wowCount': 1, + 'thankfulCount': 0, + 'interactionRate': 0.020291760507955277, + 'likeCount': 206, + 'hahaCount': 2, + 'commentCount': 15, + 'shareCount': 51, + 'sadCount': 0, + 'angryCount': 0, + 'totalVideoTimeMS': 154259, + 'postCount': 1 + }, + 'link': { + 'shareCount': 1419, + 'loveCount': 76, + 'totalInteractionCount': 6767, + 'wowCount': 390, + 'sadCount': 299, + 'angryCount': 1683, + 'thankfulCount': 0, + 'postCount': 7, + 'interactionRate': 0.06262568897982364, + 'likeCount': 2328, + 'hahaCount': 92, + 'commentCount': 480 + } + }, + 'subscriberData': { + 'initialCount': 1542510, + 'finalCount': 1542486 + } + }, { + 'account': { + 'id': 4004, + 'name': "The Last Word With Lawrence O'Donnell", + 'handle': 'thelastword', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16114622_1184240434964134_5160717321521180833_n.png?_nc_cat=1&_nc_oc=AQkE59Us5gvqt0N90qJZW6XSCRHGK5YGgwcB-G1YctCjO7mmMEWXfrnnaX-jZYV633o&_nc_ht=scontent.xx&oh=eaa0c18d2823fe813960f06f60585643&oe=5E08F8C6', + 'subscriberCount': 515865, + 'url': 'https://www.facebook.com/114945745226947', + 'platform': 'Facebook', + 'platformId': '114945745226947', + 'verified': True + }, + 'summary': { + 'loveCount': 292, + 'totalInteractionCount': 6972, + 'wowCount': 346, + 'thankfulCount': 0, + 'interactionRate': 0.13511286867688252, + 'likeCount': 1822, + 'hahaCount': 111, + 'commentCount': 534, + 'shareCount': 1632, + 'sadCount': 233, + 'angryCount': 2002, + 'totalVideoTimeMS': 164000, + 'postCount': 10 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 7, + 'totalInteractionCount': 1348, + 'wowCount': 111, + 'thankfulCount': 0, + 'interactionRate': 0.26130867571942273, + 'likeCount': 268, + 'hahaCount': 8, + 'commentCount': 136, + 'shareCount': 355, + 'sadCount': 19, + 'angryCount': 444, + 'totalVideoTimeMS': 164000, + 'postCount': 1 + }, + 'owned_video': { + 'loveCount': 7, + 'totalInteractionCount': 1348, + 'wowCount': 111, + 'thankfulCount': 0, + 'interactionRate': 0.26130867571942273, + 'likeCount': 268, + 'hahaCount': 8, + 'commentCount': 136, + 'shareCount': 355, + 'sadCount': 19, + 'angryCount': 444, + 'totalVideoTimeMS': 164000, + 'postCount': 1 + }, + 'link': { + 'shareCount': 1277, + 'loveCount': 285, + 'totalInteractionCount': 5624, + 'wowCount': 235, + 'sadCount': 214, + 'angryCount': 1558, + 'thankfulCount': 0, + 'postCount': 9, + 'interactionRate': 0.12096187956151319, + 'likeCount': 1554, + 'hahaCount': 103, + 'commentCount': 398 + } + }, + 'subscriberData': { + 'initialCount': 515865, + 'finalCount': 515865 + } + }, { + 'account': { + 'id': 70882, + 'name': "Beto O'Rourke", + 'handle': 'betoorourke', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/26047358_1510626285653743_4735771441297721924_n.jpg?_nc_cat=1&_nc_oc=AQmnnmWuN4ETlzHyBrSSQHdk1_CRKiq7bJlyFjoZ-RIKGey-i9IGXLUGIDVsarFy_m0&_nc_ht=scontent.xx&oh=8289827910a7251c17e5ce8b047c6fd9&oe=5E075DA7', + 'subscriberCount': 917049, + 'url': 'https://www.facebook.com/223055747744143', + 'platform': 'Facebook', + 'platformId': '223055747744143', + 'verified': True + }, + 'summary': { + 'loveCount': 1449, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 6715, + 'wowCount': 78, + 'thankfulCount': 0, + 'interactionRate': 0.1829766306783548, + 'likeCount': 3751, + 'hahaCount': 54, + 'commentCount': 711, + 'shareCount': 655, + 'sadCount': 7, + 'angryCount': 10, + 'totalVideoTimeMS': 434812, + 'postCount': 4 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 1124, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 4794, + 'wowCount': 56, + 'thankfulCount': 0, + 'interactionRate': 0.17425307260072165, + 'likeCount': 2396, + 'hahaCount': 43, + 'commentCount': 617, + 'shareCount': 551, + 'sadCount': 2, + 'angryCount': 5, + 'totalVideoTimeMS': 434812, + 'postCount': 3 + }, + 'owned_video': { + 'loveCount': 1124, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 4794, + 'wowCount': 56, + 'thankfulCount': 0, + 'interactionRate': 0.17425307260072165, + 'likeCount': 2396, + 'hahaCount': 43, + 'commentCount': 617, + 'shareCount': 551, + 'sadCount': 2, + 'angryCount': 5, + 'totalVideoTimeMS': 434812, + 'postCount': 3 + }, + 'photo': { + 'shareCount': 104, + 'loveCount': 325, + 'totalInteractionCount': 1921, + 'wowCount': 22, + 'sadCount': 5, + 'angryCount': 5, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.20947443833916538, + 'likeCount': 1355, + 'hahaCount': 11, + 'commentCount': 94 + } + }, + 'subscriberData': { + 'initialCount': 917065, + 'finalCount': 917049 + } + }, { + 'account': { + 'id': 802304, + 'name': 'LifeSiteNews.com', + 'handle': 'LifeSiteNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/14045774_10154523214678203_3249696568170902881_n.png?_nc_cat=109&_nc_oc=AQkYiOUyuVFI9ABzfNjSP2Yx1Fi0sRpuYE0U7JBYB2UtkJwykb2_lWjO7RuW_QHhoN8&_nc_ht=scontent.xx&oh=f0d83796cec1106eb43ee96d151e21dd&oe=5E12B767', + 'subscriberCount': 199458, + 'url': 'https://www.facebook.com/112623813202', + 'platform': 'Facebook', + 'platformId': '112623813202', + 'verified': False + }, + 'summary': { + 'shareCount': 2888, + 'loveCount': 58, + 'totalInteractionCount': 6534, + 'wowCount': 138, + 'sadCount': 620, + 'angryCount': 1661, + 'thankfulCount': 0, + 'postCount': 11, + 'interactionRate': 0.2978242285525191, + 'likeCount': 730, + 'hahaCount': 26, + 'commentCount': 413 + }, + 'breakdown': { + 'link': { + 'shareCount': 2888, + 'loveCount': 58, + 'totalInteractionCount': 6534, + 'wowCount': 138, + 'sadCount': 620, + 'angryCount': 1661, + 'thankfulCount': 0, + 'postCount': 11, + 'interactionRate': 0.2978242285525191, + 'likeCount': 730, + 'hahaCount': 26, + 'commentCount': 413 + } + }, + 'subscriberData': { + 'initialCount': 199435, + 'finalCount': 199458 + } + }, { + 'account': { + 'id': 1434569, + 'name': 'Stand Up America', + 'handle': 'StandUpAmerica', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/46836898_641805796289575_5665799734510485504_n.png?_nc_cat=104&_nc_oc=AQnhfgvoFjvWj9dXVn0PbdHHi5khWt7WAJw6v0sooYifJY9u5dJtlNL7DqCIjlReKss&_nc_ht=scontent.xx&oh=4aba644629bd7f2680e9f2fb7ffbd864&oe=5E018897', + 'subscriberCount': 1217170, + 'url': 'https://www.facebook.com/169597416843751', + 'platform': 'Facebook', + 'platformId': '169597416843751', + 'verified': True + }, + 'summary': { + 'shareCount': 1745, + 'loveCount': 4, + 'totalInteractionCount': 6327, + 'wowCount': 191, + 'sadCount': 99, + 'angryCount': 3060, + 'thankfulCount': 0, + 'postCount': 3, + 'interactionRate': 0.1732679368052646, + 'likeCount': 599, + 'hahaCount': 14, + 'commentCount': 615 + }, + 'breakdown': { + 'link': { + 'shareCount': 1745, + 'loveCount': 4, + 'totalInteractionCount': 6327, + 'wowCount': 191, + 'sadCount': 99, + 'angryCount': 3060, + 'thankfulCount': 0, + 'postCount': 3, + 'interactionRate': 0.1732679368052646, + 'likeCount': 599, + 'hahaCount': 14, + 'commentCount': 615 + } + }, + 'subscriberData': { + 'initialCount': 1217210, + 'finalCount': 1217170 + } + }, { + 'account': { + 'id': 6631, + 'name': 'Slate.com', + 'handle': 'Slate', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/26815412_10155867835401438_6786592847511925697_n.jpg?_nc_cat=1&_nc_oc=AQnlPqxpF8HJHZLBBP9M3JCvr7KRojNU13Gek2aIDlLStNh3FwBSADznEiZCEG1_doE&_nc_ht=scontent.xx&oh=fa5bf2320fbcba9484de00ac7f908e6c&oe=5DC8F5CA', + 'subscriberCount': 1518896, + 'url': 'https://www.facebook.com/21516776437', + 'platform': 'Facebook', + 'platformId': '21516776437', + 'verified': True + }, + 'summary': { + 'shareCount': 1119, + 'loveCount': 44, + 'totalInteractionCount': 6254, + 'wowCount': 231, + 'sadCount': 188, + 'angryCount': 2143, + 'thankfulCount': 0, + 'postCount': 27, + 'interactionRate': 0.01520832441791949, + 'likeCount': 1175, + 'hahaCount': 410, + 'commentCount': 944 + }, + 'breakdown': { + 'link': { + 'shareCount': 1119, + 'loveCount': 44, + 'totalInteractionCount': 6254, + 'wowCount': 231, + 'sadCount': 188, + 'angryCount': 2143, + 'thankfulCount': 0, + 'postCount': 27, + 'interactionRate': 0.01520832441791949, + 'likeCount': 1175, + 'hahaCount': 410, + 'commentCount': 944 + } + }, + 'subscriberData': { + 'initialCount': 1518914, + 'finalCount': 1518896 + } + }, { + 'account': { + 'id': 10334, + 'name': 'Forbes', + 'handle': 'forbes', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/50813627_10157101876612509_8264332807757627392_n.jpg?_nc_cat=1&_nc_oc=AQnAYl9Z16OLHbDWE7a-lxG8OIKUVGaLmLLun6kzNd5lYoeiHsDE7pDBktqBmspzHnQ&_nc_ht=scontent.xx&oh=90b9d2de38f9b99c2b632764adb156c1&oe=5E020D31', + 'subscriberCount': 5732199, + 'url': 'https://www.facebook.com/30911162508', + 'platform': 'Facebook', + 'platformId': '30911162508', + 'verified': True + }, + 'summary': { + 'loveCount': 265, + 'totalInteractionCount': 6197, + 'wowCount': 185, + 'thankfulCount': 0, + 'interactionRate': 0.002076058587071162, + 'likeCount': 3423, + 'hahaCount': 248, + 'commentCount': 691, + 'shareCount': 1240, + 'sadCount': 57, + 'angryCount': 88, + 'totalVideoTimeMS': 179004, + 'postCount': 52 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 4, + 'totalInteractionCount': 41, + 'wowCount': 1, + 'thankfulCount': 0, + 'interactionRate': 0.00034891740959179193, + 'likeCount': 22, + 'hahaCount': 0, + 'commentCount': 3, + 'shareCount': 11, + 'sadCount': 0, + 'angryCount': 0, + 'totalVideoTimeMS': 179004, + 'postCount': 2 + }, + 'owned_video': { + 'loveCount': 4, + 'totalInteractionCount': 41, + 'wowCount': 1, + 'thankfulCount': 0, + 'interactionRate': 0.00034891740959179193, + 'likeCount': 22, + 'hahaCount': 0, + 'commentCount': 3, + 'shareCount': 11, + 'sadCount': 0, + 'angryCount': 0, + 'totalVideoTimeMS': 179004, + 'postCount': 2 + }, + 'link': { + 'shareCount': 1059, + 'loveCount': 206, + 'totalInteractionCount': 5269, + 'wowCount': 179, + 'sadCount': 56, + 'angryCount': 88, + 'thankfulCount': 0, + 'postCount': 48, + 'interactionRate': 0.001901599882275266, + 'likeCount': 2759, + 'hahaCount': 247, + 'commentCount': 675 + }, + 'photo': { + 'shareCount': 170, + 'loveCount': 55, + 'totalInteractionCount': 887, + 'wowCount': 5, + 'sadCount': 1, + 'angryCount': 0, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.007728520622458192, + 'likeCount': 642, + 'hahaCount': 1, + 'commentCount': 13 + } + }, + 'subscriberData': { + 'initialCount': 5731832, + 'finalCount': 5732199 + } + }, { + 'account': { + 'id': 10335, + 'name': 'The Wall Street Journal', + 'handle': 'wsj', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/26734229_10157192613173128_6286097899182572387_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkg3dR3V2rO72fdcQNc6Kdupv3fYH3-VXio9SvAwKULEi36QT0vhIKN0_FvohpQCGs&_nc_ht=scontent.xx&oh=f550584e1e7adab86d889e32b7468801&oe=5DFE7FE9', + 'subscriberCount': 6360356, + 'url': 'https://www.facebook.com/8304333127', + 'platform': 'Facebook', + 'platformId': '8304333127', + 'verified': True + }, + 'summary': { + 'shareCount': 636, + 'loveCount': 159, + 'totalInteractionCount': 6154, + 'wowCount': 194, + 'sadCount': 373, + 'angryCount': 61, + 'thankfulCount': 0, + 'postCount': 39, + 'interactionRate': 0.002468462250215597, + 'likeCount': 2807, + 'hahaCount': 621, + 'commentCount': 1303 + }, + 'breakdown': { + 'link': { + 'shareCount': 636, + 'loveCount': 159, + 'totalInteractionCount': 6154, + 'wowCount': 194, + 'sadCount': 373, + 'angryCount': 61, + 'thankfulCount': 0, + 'postCount': 39, + 'interactionRate': 0.002468462250215597, + 'likeCount': 2807, + 'hahaCount': 621, + 'commentCount': 1303 + } + }, + 'subscriberData': { + 'initialCount': 6360114, + 'finalCount': 6360356 + } + }, { + 'account': { + 'id': 18756, + 'name': 'BuzzFeed News', + 'handle': 'BuzzFeedNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/37324661_1987747984579543_6544772647132069888_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQl4xuZMtXJ6qFqyRhwKzfdvsAYA1JGI1ajz4X8q4bIHiObnrMGyXWEFiDcVxaVlrgM&_nc_ht=scontent.xx&oh=3a3c2ae104e50e8860b8dcf413215500&oe=5DFB7022', + 'subscriberCount': 3017031, + 'url': 'https://www.facebook.com/618786471475708', + 'platform': 'Facebook', + 'platformId': '618786471475708', + 'verified': True + }, + 'summary': { + 'loveCount': 495, + 'totalInteractionCount': 6041, + 'wowCount': 145, + 'thankfulCount': 0, + 'interactionRate': 0.011106491694664973, + 'likeCount': 2448, + 'hahaCount': 567, + 'commentCount': 881, + 'shareCount': 880, + 'sadCount': 156, + 'angryCount': 469, + 'totalVideoTimeMS': 230968, + 'postCount': 18 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 310, + 'totalInteractionCount': 3054, + 'wowCount': 45, + 'thankfulCount': 0, + 'interactionRate': 0.03375047326916102, + 'likeCount': 1169, + 'hahaCount': 437, + 'commentCount': 587, + 'shareCount': 503, + 'sadCount': 3, + 'angryCount': 0, + 'totalVideoTimeMS': 164111, + 'postCount': 3 + }, + 'owned_video': { + 'loveCount': 310, + 'totalInteractionCount': 3118, + 'wowCount': 46, + 'thankfulCount': 0, + 'interactionRate': 0.025826737403414964, + 'likeCount': 1185, + 'hahaCount': 438, + 'commentCount': 591, + 'shareCount': 517, + 'sadCount': 27, + 'angryCount': 4, + 'totalVideoTimeMS': 230968, + 'postCount': 4 + }, + 'crosspost': { + 'loveCount': 0, + 'totalInteractionCount': 64, + 'wowCount': 1, + 'thankfulCount': 0, + 'interactionRate': 0.0021218372192792784, + 'likeCount': 16, + 'hahaCount': 1, + 'commentCount': 4, + 'shareCount': 14, + 'sadCount': 24, + 'angryCount': 4, + 'totalVideoTimeMS': 66857, + 'postCount': 1 + }, + 'link': { + 'shareCount': 363, + 'loveCount': 185, + 'totalInteractionCount': 2923, + 'wowCount': 99, + 'sadCount': 129, + 'angryCount': 465, + 'thankfulCount': 0, + 'postCount': 14, + 'interactionRate': 0.006895970962657654, + 'likeCount': 1263, + 'hahaCount': 129, + 'commentCount': 290 + } + }, + 'subscriberData': { + 'initialCount': 3015477, + 'finalCount': 3017031 + } + }, { + 'account': { + 'id': 7781, + 'name': 'The Daily Beast', + 'handle': 'thedailybeast', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/18447180_10155420999849203_1942956350622474660_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlsvWaYHxyRC2B3NwwmVoV1kpqGNvYkkSxSr_lFopmdwhj-uerxTWu7CmbWz-8Qq-Q&_nc_ht=scontent.xx&oh=86caf840e49b739e6381c591317aab4b&oe=5DC85150', + 'subscriberCount': 2163118, + 'url': 'https://www.facebook.com/37763684202', + 'platform': 'Facebook', + 'platformId': '37763684202', + 'verified': True + }, + 'summary': { + 'shareCount': 1041, + 'loveCount': 103, + 'totalInteractionCount': 6002, + 'wowCount': 358, + 'sadCount': 205, + 'angryCount': 1163, + 'thankfulCount': 0, + 'postCount': 22, + 'interactionRate': 0.012574188288761612, + 'likeCount': 1906, + 'hahaCount': 338, + 'commentCount': 888 + }, + 'breakdown': { + 'link': { + 'shareCount': 1041, + 'loveCount': 103, + 'totalInteractionCount': 6002, + 'wowCount': 358, + 'sadCount': 205, + 'angryCount': 1163, + 'thankfulCount': 0, + 'postCount': 22, + 'interactionRate': 0.012574188288761612, + 'likeCount': 1906, + 'hahaCount': 338, + 'commentCount': 888 + } + }, + 'subscriberData': { + 'initialCount': 2163205, + 'finalCount': 2163118 + } + }, { + 'account': { + 'id': 38713, + 'name': 'C-SPAN', + 'handle': 'CSPAN', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/55510789_10157680504275579_8831454901000208384_n.jpg?_nc_cat=1&_nc_oc=AQmjwnGr1J3r3CGT1XgTUKOfeg_fD4sEbdS9k1LsQLmt6p7MuMZttOOafaHCnbTbRLM&_nc_ht=scontent.xx&oh=6cbbd30189f614a83fd0cd8c71f48f2a&oe=5DF5A55A', + 'subscriberCount': 1225429, + 'url': 'https://www.facebook.com/21472760578', + 'platform': 'Facebook', + 'platformId': '21472760578', + 'verified': True + }, + 'summary': { + 'shareCount': 128, + 'loveCount': 59, + 'totalInteractionCount': 5649, + 'wowCount': 10, + 'sadCount': 2, + 'angryCount': 3, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.4609752126879572, + 'likeCount': 456, + 'hahaCount': 104, + 'commentCount': 4887 + }, + 'breakdown': { + 'status': { + 'shareCount': 128, + 'loveCount': 59, + 'totalInteractionCount': 5649, + 'wowCount': 10, + 'sadCount': 2, + 'angryCount': 3, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.4609752126879572, + 'likeCount': 456, + 'hahaCount': 104, + 'commentCount': 4887 + } + }, + 'subscriberData': { + 'initialCount': 1225462, + 'finalCount': 1225429 + } + }, { + 'account': { + 'id': 40861, + 'name': 'AM Joy on MSNBC', + 'handle': 'amjoyshow', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16649094_1410994648972224_5741405830257531775_n.jpg?_nc_cat=1&_nc_oc=AQmHBK8cydrfov5x4YKdJPorNZH_m9SRBtjQ0frg7JykmFvhVgN67G0HMg73UoT_mto&_nc_ht=scontent.xx&oh=5eb86ad6a838433c9a88a570da2d9885&oe=5E0A5DB1', + 'subscriberCount': 337795, + 'url': 'https://www.facebook.com/598356233569407', + 'platform': 'Facebook', + 'platformId': '598356233569407', + 'verified': True + }, + 'summary': { + 'loveCount': 548, + 'totalInteractionCount': 5198, + 'wowCount': 93, + 'thankfulCount': 0, + 'interactionRate': 0.15364985005077253, + 'likeCount': 1681, + 'hahaCount': 28, + 'commentCount': 1488, + 'shareCount': 536, + 'sadCount': 46, + 'angryCount': 778, + 'totalVideoTimeMS': 55935, + 'postCount': 10 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 430, + 'totalInteractionCount': 2407, + 'wowCount': 4, + 'thankfulCount': 0, + 'interactionRate': 0.35614791832577913, + 'likeCount': 650, + 'hahaCount': 19, + 'commentCount': 1121, + 'shareCount': 179, + 'sadCount': 1, + 'angryCount': 3, + 'totalVideoTimeMS': 55935, + 'postCount': 2 + }, + 'owned_video': { + 'loveCount': 430, + 'totalInteractionCount': 2407, + 'wowCount': 4, + 'thankfulCount': 0, + 'interactionRate': 0.35614791832577913, + 'likeCount': 650, + 'hahaCount': 19, + 'commentCount': 1121, + 'shareCount': 179, + 'sadCount': 1, + 'angryCount': 3, + 'totalVideoTimeMS': 55935, + 'postCount': 2 + }, + 'link': { + 'shareCount': 342, + 'loveCount': 16, + 'totalInteractionCount': 2003, + 'wowCount': 89, + 'sadCount': 45, + 'angryCount': 775, + 'thankfulCount': 0, + 'postCount': 6, + 'interactionRate': 0.09858458587072688, + 'likeCount': 414, + 'hahaCount': 8, + 'commentCount': 314 + }, + 'photo': { + 'shareCount': 15, + 'loveCount': 102, + 'totalInteractionCount': 788, + 'wowCount': 0, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.1166436241233225, + 'likeCount': 617, + 'hahaCount': 1, + 'commentCount': 53 + } + }, + 'subscriberData': { + 'initialCount': 337767, + 'finalCount': 337795 + } + }, { + 'account': { + 'id': 8830, + 'name': 'Team Coco', + 'handle': 'teamcoco', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/27545533_1749701865088688_7429648788456974231_n.png?_nc_cat=1&_nc_oc=AQmIkkiMMABDhk0_zoAtgXNkZTDYcJ8uW8Ig76pvppQhEs1OyF4zSAtAZzFS14OC94g&_nc_ht=scontent.xx&oh=f04325a386158ffaa5df7dafbfea7d7b&oe=5E014FF4', + 'subscriberCount': 3980440, + 'url': 'https://www.facebook.com/108905269168364', + 'platform': 'Facebook', + 'platformId': '108905269168364', + 'verified': True + }, + 'summary': { + 'loveCount': 239, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 5162, + 'wowCount': 4, + 'thankfulCount': 0, + 'interactionRate': 0.04321130327300499, + 'likeCount': 2364, + 'hahaCount': 1573, + 'commentCount': 200, + 'shareCount': 778, + 'sadCount': 4, + 'angryCount': 0, + 'totalVideoTimeMS': 784551, + 'postCount': 3 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 221, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 4937, + 'wowCount': 4, + 'thankfulCount': 0, + 'interactionRate': 0.06200319562661414, + 'likeCount': 2188, + 'hahaCount': 1563, + 'commentCount': 186, + 'shareCount': 771, + 'sadCount': 4, + 'angryCount': 0, + 'totalVideoTimeMS': 784551, + 'postCount': 2 + }, + 'crosspost': { + 'loveCount': 221, + 'threePlusMinuteVideoCount': 2, + 'totalInteractionCount': 4937, + 'wowCount': 4, + 'thankfulCount': 0, + 'interactionRate': 0.06200319562661414, + 'likeCount': 2188, + 'hahaCount': 1563, + 'commentCount': 186, + 'shareCount': 771, + 'sadCount': 4, + 'angryCount': 0, + 'totalVideoTimeMS': 784551, + 'postCount': 2 + }, + 'link': { + 'shareCount': 7, + 'loveCount': 18, + 'totalInteractionCount': 225, + 'wowCount': 0, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.005652641416526816, + 'likeCount': 176, + 'hahaCount': 10, + 'commentCount': 14 + } + }, + 'subscriberData': { + 'initialCount': 3980440, + 'finalCount': 3980440 + } + }, { + 'account': { + 'id': 10336, + 'name': 'Los Angeles Times', + 'handle': 'latimes', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/42232523_10156934290463010_1867037960201830400_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQkCUNuoErCZDtJ7bdvHC31FqbecsabwlNKsH2wceWr2Jv2z66F-V6pFJXzp8Z9dp5o&_nc_ht=scontent.xx&oh=9ac9785c391e9dbbdff486ee74ea0ac5&oe=5DF49524', + 'subscriberCount': 2755671, + 'url': 'https://www.facebook.com/5863113009', + 'platform': 'Facebook', + 'platformId': '5863113009', + 'verified': True + }, + 'summary': { + 'loveCount': 101, + 'totalInteractionCount': 4506, + 'wowCount': 223, + 'thankfulCount': 0, + 'interactionRate': 0.003520024037772398, + 'likeCount': 1519, + 'hahaCount': 235, + 'commentCount': 753, + 'shareCount': 674, + 'sadCount': 511, + 'angryCount': 490, + 'totalVideoTimeMS': 509063, + 'postCount': 46 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 17, + 'totalInteractionCount': 243, + 'wowCount': 7, + 'thankfulCount': 0, + 'interactionRate': 0.0012338228586006345, + 'likeCount': 124, + 'hahaCount': 5, + 'commentCount': 39, + 'shareCount': 24, + 'sadCount': 27, + 'angryCount': 0, + 'totalVideoTimeMS': 509063, + 'postCount': 7 + }, + 'owned_video': { + 'loveCount': 17, + 'totalInteractionCount': 243, + 'wowCount': 7, + 'thankfulCount': 0, + 'interactionRate': 0.0012338228586006345, + 'likeCount': 124, + 'hahaCount': 5, + 'commentCount': 39, + 'shareCount': 24, + 'sadCount': 27, + 'angryCount': 0, + 'totalVideoTimeMS': 509063, + 'postCount': 7 + }, + 'link': { + 'shareCount': 647, + 'loveCount': 83, + 'totalInteractionCount': 4246, + 'wowCount': 216, + 'sadCount': 484, + 'angryCount': 490, + 'thankfulCount': 0, + 'postCount': 38, + 'interactionRate': 0.004028068744255013, + 'likeCount': 1383, + 'hahaCount': 230, + 'commentCount': 713 + }, + 'photo': { + 'shareCount': 3, + 'loveCount': 1, + 'totalInteractionCount': 17, + 'wowCount': 0, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.0006169114293003172, + 'likeCount': 12, + 'hahaCount': 0, + 'commentCount': 1 + } + }, + 'subscriberData': { + 'initialCount': 2755655, + 'finalCount': 2755671 + } + }, { + 'account': { + 'id': 115499, + 'name': 'Todd Starnes', + 'handle': 'ToddStarnesFNC', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/46920198_2335144319893720_2455881966508048384_n.jpg?_nc_cat=109&_nc_oc=AQlVsotrCFkbQxUVhqk_a20GnVwI4eyMwE1FyELHA5EaL9-KdatZ3bMmtSFKeyQE0jk&_nc_ht=scontent.xx&oh=ece111e79c6914b50e86197d088ecff7&oe=5DFD2E1C', + 'subscriberCount': 265905, + 'url': 'https://www.facebook.com/128334087241432', + 'platform': 'Facebook', + 'platformId': '128334087241432', + 'verified': True + }, + 'summary': { + 'shareCount': 998, + 'loveCount': 250, + 'totalInteractionCount': 4169, + 'wowCount': 46, + 'sadCount': 2, + 'angryCount': 67, + 'thankfulCount': 0, + 'postCount': 4, + 'interactionRate': 0.3918854882020655, + 'likeCount': 2412, + 'hahaCount': 84, + 'commentCount': 310 + }, + 'breakdown': { + 'link': { + 'shareCount': 998, + 'loveCount': 250, + 'totalInteractionCount': 4169, + 'wowCount': 46, + 'sadCount': 2, + 'angryCount': 67, + 'thankfulCount': 0, + 'postCount': 4, + 'interactionRate': 0.3918854882020655, + 'likeCount': 2412, + 'hahaCount': 84, + 'commentCount': 310 + } + }, + 'subscriberData': { + 'initialCount': 265883, + 'finalCount': 265905 + } + }, { + 'account': { + 'id': 6895, + 'name': 'Hot Air', + 'handle': 'hotaircom', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1918240_103971746305177_1971637_n.jpg?_nc_cat=105&_nc_oc=AQmq3wr_U32ihNNbx8lpwEGbZjWoKbczxWgDl2LGkBuBl9j9-tyWq8Sc1J2iHA8jRWY&_nc_ht=scontent.xx&oh=86e1f495f452096fe58cf9743e825f16&oe=5E0A5E0D', + 'subscriberCount': 785694, + 'url': 'https://www.facebook.com/103971336305218', + 'platform': 'Facebook', + 'platformId': '103971336305218', + 'verified': True + }, + 'summary': { + 'shareCount': 599, + 'loveCount': 19, + 'totalInteractionCount': 4066, + 'wowCount': 39, + 'sadCount': 81, + 'angryCount': 765, + 'thankfulCount': 0, + 'postCount': 17, + 'interactionRate': 0.03041811545138447, + 'likeCount': 508, + 'hahaCount': 583, + 'commentCount': 1472 + }, + 'breakdown': { + 'link': { + 'shareCount': 599, + 'loveCount': 19, + 'totalInteractionCount': 4066, + 'wowCount': 39, + 'sadCount': 81, + 'angryCount': 765, + 'thankfulCount': 0, + 'postCount': 17, + 'interactionRate': 0.03041811545138447, + 'likeCount': 508, + 'hahaCount': 583, + 'commentCount': 1472 + } + }, + 'subscriberData': { + 'initialCount': 785738, + 'finalCount': 785694 + } + }, { + 'account': { + 'id': 8015, + 'name': 'ACLU', + 'handle': 'aclu', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22089288_10154767565091813_1188859475219690990_n.png?_nc_cat=1&_nc_oc=AQlOc1ngKYC3GfwIgJztREf3PH5Hd4YwSsZ4g2b5SwQaWmUWfBbb44yyZco-2DLasTQ&_nc_ht=scontent.xx&oh=a2e38ee9d88c35289b7dc25ecab038b1&oe=5E0C98FE', + 'subscriberCount': 2373681, + 'url': 'https://www.facebook.com/18982436812', + 'platform': 'Facebook', + 'platformId': '18982436812', + 'verified': True + }, + 'summary': { + 'shareCount': 577, + 'loveCount': 6, + 'totalInteractionCount': 4012, + 'wowCount': 158, + 'sadCount': 495, + 'angryCount': 1855, + 'thankfulCount': 0, + 'postCount': 3, + 'interactionRate': 0.05632528293238606, + 'likeCount': 765, + 'hahaCount': 11, + 'commentCount': 145 + }, + 'breakdown': { + 'link': { + 'shareCount': 434, + 'loveCount': 6, + 'totalInteractionCount': 2894, + 'wowCount': 140, + 'sadCount': 385, + 'angryCount': 1140, + 'thankfulCount': 0, + 'postCount': 2, + 'interactionRate': 0.060959375021064056, + 'likeCount': 681, + 'hahaCount': 9, + 'commentCount': 99 + }, + 'photo': { + 'shareCount': 143, + 'loveCount': 0, + 'totalInteractionCount': 1118, + 'wowCount': 18, + 'sadCount': 110, + 'angryCount': 715, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.04709922686492717, + 'likeCount': 84, + 'hahaCount': 2, + 'commentCount': 46 + } + }, + 'subscriberData': { + 'initialCount': 2373743, + 'finalCount': 2373681 + } + }, { + 'account': { + 'id': 48821, + 'name': 'Crooks and Liars', + 'handle': 'crooksandliars.site', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/399460_10150942179577183_1849263528_n.jpg?_nc_cat=103&_nc_oc=AQnr-v08LlItth11GcPpklOwQqPVr0pcraHt2N9z8qAYugvNo-dOrF98XGKQSUDfBqA&_nc_ht=scontent.xx&oh=72192ff8f806d8bc882b8fd212075eb4&oe=5DFA36B2', + 'subscriberCount': 137575, + 'url': 'https://www.facebook.com/33455892182', + 'platform': 'Facebook', + 'platformId': '33455892182', + 'verified': False + }, + 'summary': { + 'shareCount': 1433, + 'loveCount': 39, + 'totalInteractionCount': 3697, + 'wowCount': 82, + 'sadCount': 38, + 'angryCount': 833, + 'thankfulCount': 0, + 'postCount': 14, + 'interactionRate': 0.19191416202934689, + 'likeCount': 647, + 'hahaCount': 240, + 'commentCount': 385 + }, + 'breakdown': { + 'link': { + 'shareCount': 1433, + 'loveCount': 39, + 'totalInteractionCount': 3697, + 'wowCount': 82, + 'sadCount': 38, + 'angryCount': 833, + 'thankfulCount': 0, + 'postCount': 14, + 'interactionRate': 0.19191416202934689, + 'likeCount': 647, + 'hahaCount': 240, + 'commentCount': 385 + } + }, + 'subscriberData': { + 'initialCount': 137548, + 'finalCount': 137575 + } + }, { + 'account': { + 'id': 18808, + 'name': 'All In with Chris Hayes', + 'handle': 'allinwithchris', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10547485_345099262321772_6121447368826167660_n.jpg?_nc_cat=104&_nc_oc=AQlozD0cFDb_FbzaOqCikk-LgnOKbPkWtpTgtdkKGtLsFEVkknXg-KX1hj_61rRWnoo&_nc_ht=scontent.xx&oh=28aa3954be984198f12f1c3c1959f089&oe=5E047CA4', + 'subscriberCount': 318658, + 'url': 'https://www.facebook.com/153005644864469', + 'platform': 'Facebook', + 'platformId': '153005644864469', + 'verified': True + }, + 'summary': { + 'shareCount': 362, + 'loveCount': 231, + 'totalInteractionCount': 3399, + 'wowCount': 66, + 'sadCount': 37, + 'angryCount': 174, + 'thankfulCount': 0, + 'postCount': 11, + 'interactionRate': 0.09697996845797142, + 'likeCount': 1609, + 'hahaCount': 241, + 'commentCount': 679 + }, + 'breakdown': { + 'link': { + 'shareCount': 362, + 'loveCount': 231, + 'totalInteractionCount': 3399, + 'wowCount': 66, + 'sadCount': 37, + 'angryCount': 174, + 'thankfulCount': 0, + 'postCount': 11, + 'interactionRate': 0.09697996845797142, + 'likeCount': 1609, + 'hahaCount': 241, + 'commentCount': 679 + } + }, + 'subscriberData': { + 'initialCount': 318587, + 'finalCount': 318658 + } + }, { + 'account': { + 'id': 6803, + 'name': 'ATTN:', + 'handle': 'attn', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/55489107_2197559426946171_5392793883551727616_n.png?_nc_cat=1&_nc_oc=AQlFVtxJFIGlgUkYcw60Zkg1TJJE0DfLcxAkL46yxLXEzD3fwboZzZWSZrZb7ZRK1A0&_nc_ht=scontent.xx&oh=b8584181fa979fe9be2ba7ccdf83ee91&oe=5DF5C3E9', + 'subscriberCount': 6115190, + 'url': 'https://www.facebook.com/160389977329803', + 'platform': 'Facebook', + 'platformId': '160389977329803', + 'verified': True + }, + 'summary': { + 'loveCount': 234, + 'totalInteractionCount': 3344, + 'wowCount': 43, + 'thankfulCount': 0, + 'interactionRate': 0.013670719942465237, + 'likeCount': 1581, + 'hahaCount': 77, + 'commentCount': 406, + 'shareCount': 906, + 'sadCount': 49, + 'angryCount': 48, + 'totalVideoTimeMS': 465869, + 'postCount': 4 + }, + 'breakdown': { + 'share': { + 'loveCount': 234, + 'totalInteractionCount': 3344, + 'wowCount': 43, + 'thankfulCount': 0, + 'interactionRate': 0.013670719942465237, + 'likeCount': 1581, + 'hahaCount': 77, + 'commentCount': 406, + 'shareCount': 906, + 'sadCount': 49, + 'angryCount': 48, + 'totalVideoTimeMS': 465869, + 'postCount': 4 + } + }, + 'subscriberData': { + 'initialCount': 6115329, + 'finalCount': 6115190 + } + }, { + 'account': { + 'id': 10870, + 'name': 'Ron Wyden', + 'handle': 'wyden', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12190982_10153365721817858_6948350790870886697_n.jpg?_nc_cat=100&_nc_oc=AQlSBDMPg1ns4cB8WiToj9dg65G_2i3lMHXfEYQsHz1nRK02sr0h5MYdH7To6P7NX90&_nc_ht=scontent.xx&oh=3cd24adf4b8435744c4d1a54cfb318df&oe=5DF9B0D9', + 'subscriberCount': 285891, + 'url': 'https://www.facebook.com/54787697857', + 'platform': 'Facebook', + 'platformId': '54787697857', + 'verified': True + }, + 'summary': { + 'shareCount': 374, + 'loveCount': 108, + 'totalInteractionCount': 3320, + 'wowCount': 12, + 'sadCount': 14, + 'angryCount': 192, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 1.1612533141190216, + 'likeCount': 2267, + 'hahaCount': 15, + 'commentCount': 338 + }, + 'breakdown': { + 'status': { + 'shareCount': 374, + 'loveCount': 108, + 'totalInteractionCount': 3320, + 'wowCount': 12, + 'sadCount': 14, + 'angryCount': 192, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 1.1612533141190216, + 'likeCount': 2267, + 'hahaCount': 15, + 'commentCount': 338 + } + }, + 'subscriberData': { + 'initialCount': 285905, + 'finalCount': 285891 + } + }, { + 'account': { + 'id': 7201, + 'name': 'Reason Magazine', + 'handle': 'Reason.Magazine', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/25507745_10155330003029117_4400239766346429931_n.png?_nc_cat=1&_nc_oc=AQnagqHyUqN5W0ZyyHCUfDHPUMXM_4ac8YRBQE669TgxJkv1DFyTfv73gsTrX6mUrgY&_nc_ht=scontent.xx&oh=6cbfe90b09ec3e8192ff64359dc59e20&oe=5E023167', + 'subscriberCount': 469507, + 'url': 'https://www.facebook.com/17548474116', + 'platform': 'Facebook', + 'platformId': '17548474116', + 'verified': True + }, + 'summary': { + 'loveCount': 87, + 'threePlusMinuteVideoCount': 3, + 'totalInteractionCount': 3268, + 'wowCount': 23, + 'thankfulCount': 0, + 'interactionRate': 0.057934836089142974, + 'likeCount': 1670, + 'hahaCount': 156, + 'commentCount': 222, + 'shareCount': 1025, + 'sadCount': 49, + 'angryCount': 36, + 'totalVideoTimeMS': 2874551, + 'postCount': 12 + }, + 'breakdown': { + 'owned_video': { + 'loveCount': 43, + 'threePlusMinuteVideoCount': 3, + 'totalInteractionCount': 1015, + 'wowCount': 6, + 'thankfulCount': 0, + 'interactionRate': 0.053887917391739606, + 'likeCount': 475, + 'hahaCount': 71, + 'commentCount': 62, + 'shareCount': 346, + 'sadCount': 7, + 'angryCount': 5, + 'totalVideoTimeMS': 2874551, + 'postCount': 4 + }, + 'crosspost': { + 'loveCount': 43, + 'threePlusMinuteVideoCount': 3, + 'totalInteractionCount': 1015, + 'wowCount': 6, + 'thankfulCount': 0, + 'interactionRate': 0.053887917391739606, + 'likeCount': 475, + 'hahaCount': 71, + 'commentCount': 62, + 'shareCount': 346, + 'sadCount': 7, + 'angryCount': 5, + 'totalVideoTimeMS': 2874551, + 'postCount': 4 + }, + 'link': { + 'shareCount': 679, + 'loveCount': 44, + 'totalInteractionCount': 2253, + 'wowCount': 17, + 'sadCount': 42, + 'angryCount': 31, + 'thankfulCount': 0, + 'postCount': 8, + 'interactionRate': 0.059851797577386665, + 'likeCount': 1195, + 'hahaCount': 85, + 'commentCount': 160 + } + }, + 'subscriberData': { + 'initialCount': 469479, + 'finalCount': 469507 + } + }, { + 'account': { + 'id': 17948, + 'name': 'The Tonight Show Starring Jimmy Fallon', + 'handle': 'FallonTonight', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/40960938_10156796565408896_7404903529636167680_n.jpg?_nc_cat=1&_nc_oc=AQnoOgepqCwTPL1-ppqLUJBEQ7SLONcvBAudjquT4-cSinjzmGe4Nh8daL32nyC7tkA&_nc_ht=scontent.xx&oh=61dd49b423492addb96c27c27f7ba2eb&oe=5E0F8A8D', + 'subscriberCount': 14248140, + 'url': 'https://www.facebook.com/31732483895', + 'platform': 'Facebook', + 'platformId': '31732483895', + 'verified': True + }, + 'summary': { + 'loveCount': 248, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 3200, + 'wowCount': 186, + 'thankfulCount': 0, + 'interactionRate': 0.011229603311806535, + 'likeCount': 2038, + 'hahaCount': 242, + 'commentCount': 138, + 'shareCount': 338, + 'sadCount': 4, + 'angryCount': 6, + 'totalVideoTimeMS': 693138, + 'postCount': 2 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 38, + 'totalInteractionCount': 537, + 'wowCount': 3, + 'thankfulCount': 0, + 'interactionRate': 0.003768935611525068, + 'likeCount': 283, + 'hahaCount': 123, + 'commentCount': 53, + 'shareCount': 30, + 'sadCount': 3, + 'angryCount': 4, + 'totalVideoTimeMS': 125459, + 'postCount': 1 + }, + 'owned_video': { + 'loveCount': 248, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 3200, + 'wowCount': 186, + 'thankfulCount': 0, + 'interactionRate': 0.011229603311806535, + 'likeCount': 2038, + 'hahaCount': 242, + 'commentCount': 138, + 'shareCount': 338, + 'sadCount': 4, + 'angryCount': 6, + 'totalVideoTimeMS': 693138, + 'postCount': 2 + }, + 'crosspost': { + 'loveCount': 210, + 'threePlusMinuteVideoCount': 1, + 'totalInteractionCount': 2663, + 'wowCount': 183, + 'thankfulCount': 0, + 'interactionRate': 0.018690271012088002, + 'likeCount': 1755, + 'hahaCount': 119, + 'commentCount': 85, + 'shareCount': 308, + 'sadCount': 1, + 'angryCount': 2, + 'totalVideoTimeMS': 567679, + 'postCount': 1 + } + }, + 'subscriberData': { + 'initialCount': 14247969, + 'finalCount': 14248140 + } + }, { + 'account': { + 'id': 46610, + 'name': 'Chris Murphy', + 'handle': 'ChrisMurphyCT', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16508448_10154689090443961_3586091731088933149_n.jpg?_nc_cat=105&_nc_oc=AQlvuUbIcdD9Dkn7lKNRqcM8WBn2ExJPinOnC5BOuFt2fYoeJ29C3gMzj5V7Var7l64&_nc_ht=scontent.xx&oh=32d0568a8ff3afbe50618dcda76dd79e&oe=5E01E210', + 'subscriberCount': 252316, + 'url': 'https://www.facebook.com/19437978960', + 'platform': 'Facebook', + 'platformId': '19437978960', + 'verified': True + }, + 'summary': { + 'shareCount': 419, + 'loveCount': 6, + 'totalInteractionCount': 3133, + 'wowCount': 26, + 'sadCount': 1243, + 'angryCount': 286, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 1.2416796957825298, + 'likeCount': 908, + 'hahaCount': 24, + 'commentCount': 221 + }, + 'breakdown': { + 'photo': { + 'shareCount': 419, + 'loveCount': 6, + 'totalInteractionCount': 3133, + 'wowCount': 26, + 'sadCount': 1243, + 'angryCount': 286, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 1.2416796957825298, + 'likeCount': 908, + 'hahaCount': 24, + 'commentCount': 221 + } + }, + 'subscriberData': { + 'initialCount': 252323, + 'finalCount': 252316 + } + }, { + 'account': { + 'id': 370587, + 'name': 'Tea Party', + 'handle': 'teapartyorg', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10645152_1119529788064461_6831324369519464936_n.png?_nc_cat=109&_nc_oc=AQlYPwkxXVKsTPXKN2iEw6-kekm3w1t-TNKlGRez6lg5WNmUCadSHtPr1aKi7-vMXx0&_nc_ht=scontent.xx&oh=47cea2dd8d0821de871a1f427d4cc9c3&oe=5E051C4C', + 'subscriberCount': 416797, + 'url': 'https://www.facebook.com/172526489431467', + 'platform': 'Facebook', + 'platformId': '172526489431467', + 'verified': True + }, + 'summary': { + 'shareCount': 409, + 'loveCount': 35, + 'totalInteractionCount': 3120, + 'wowCount': 67, + 'sadCount': 29, + 'angryCount': 480, + 'thankfulCount': 0, + 'postCount': 39, + 'interactionRate': 0.01919339747126988, + 'likeCount': 520, + 'hahaCount': 495, + 'commentCount': 1085 + }, + 'breakdown': { + 'link': { + 'shareCount': 409, + 'loveCount': 35, + 'totalInteractionCount': 3120, + 'wowCount': 67, + 'sadCount': 29, + 'angryCount': 480, + 'thankfulCount': 0, + 'postCount': 39, + 'interactionRate': 0.01919339747126988, + 'likeCount': 520, + 'hahaCount': 495, + 'commentCount': 1085 + } + }, + 'subscriberData': { + 'initialCount': 416823, + 'finalCount': 416797 + } + }, { + 'account': { + 'id': 4007, + 'name': 'Truthout', + 'handle': 'truthout', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/19894613_10154795655481094_2383393652303893841_n.jpg?_nc_cat=110&_nc_oc=AQkTbiRpAD3hZBOdyzgT1PgAhh4VQwvgi7_UrWwWRSAE_kE9X6Vo3lxn9jEYjOQ71yY&_nc_ht=scontent.xx&oh=a8046506973cb0fdb4deab6119ed03f5&oe=5DF988DD', + 'subscriberCount': 754194, + 'url': 'https://www.facebook.com/83865976093', + 'platform': 'Facebook', + 'platformId': '83865976093', + 'verified': True + }, + 'summary': { + 'shareCount': 919, + 'loveCount': 100, + 'totalInteractionCount': 3082, + 'wowCount': 97, + 'sadCount': 58, + 'angryCount': 644, + 'thankfulCount': 0, + 'postCount': 7, + 'interactionRate': 0.058339614786175366, + 'likeCount': 980, + 'hahaCount': 86, + 'commentCount': 198 + }, + 'breakdown': { + 'link': { + 'shareCount': 919, + 'loveCount': 100, + 'totalInteractionCount': 3082, + 'wowCount': 97, + 'sadCount': 58, + 'angryCount': 644, + 'thankfulCount': 0, + 'postCount': 7, + 'interactionRate': 0.058339614786175366, + 'likeCount': 980, + 'hahaCount': 86, + 'commentCount': 198 + } + }, + 'subscriberData': { + 'initialCount': 754215, + 'finalCount': 754194 + } + }, { + 'account': { + 'id': 10662, + 'name': 'Senator Chuck Schumer', + 'handle': 'senschumer', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/c53.0.200.200a/p200x200/484164_10151474167894407_1415959450_n.png?_nc_cat=110&_nc_oc=AQmwM-QX_xbE0MV5qDszLzLb9AfcHcmbiFLZejE1imfSOMx9OawOnk_4NRNjQN6BQko&_nc_ht=scontent.xx&oh=b4eb0adf1c1c4d46d1933f6f4a5ffd87&oe=5DFB0E16', + 'subscriberCount': 376381, + 'url': 'https://www.facebook.com/15771239406', + 'platform': 'Facebook', + 'platformId': '15771239406', + 'verified': True + }, + 'summary': { + 'loveCount': 12, + 'totalInteractionCount': 3032, + 'wowCount': 61, + 'thankfulCount': 0, + 'interactionRate': 0.40277853424215326, + 'likeCount': 712, + 'hahaCount': 364, + 'commentCount': 805, + 'shareCount': 579, + 'sadCount': 37, + 'angryCount': 462, + 'totalVideoTimeMS': 122878, + 'postCount': 2 + }, + 'breakdown': { + 'native_video': { + 'loveCount': 5, + 'totalInteractionCount': 1405, + 'wowCount': 17, + 'thankfulCount': 0, + 'interactionRate': 0.3732874938062173, + 'likeCount': 300, + 'hahaCount': 78, + 'commentCount': 375, + 'shareCount': 352, + 'sadCount': 27, + 'angryCount': 251, + 'totalVideoTimeMS': 122878, + 'postCount': 1 + }, + 'owned_video': { + 'loveCount': 5, + 'totalInteractionCount': 1405, + 'wowCount': 17, + 'thankfulCount': 0, + 'interactionRate': 0.3732874938062173, + 'likeCount': 300, + 'hahaCount': 78, + 'commentCount': 375, + 'shareCount': 352, + 'sadCount': 27, + 'angryCount': 251, + 'totalVideoTimeMS': 122878, + 'postCount': 1 + }, + 'link': { + 'shareCount': 227, + 'loveCount': 7, + 'totalInteractionCount': 1627, + 'wowCount': 44, + 'sadCount': 10, + 'angryCount': 211, + 'thankfulCount': 0, + 'postCount': 1, + 'interactionRate': 0.43226957467808935, + 'likeCount': 412, + 'hahaCount': 286, + 'commentCount': 430 + } + }, + 'subscriberData': { + 'initialCount': 376390, + 'finalCount': 376381 + } + }, { + 'account': { + 'id': 9840, + 'name': 'CommonDreams', + 'handle': 'commondreams.org', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10469767_10152172973972016_8063428021861554001_n.jpg?_nc_cat=103&_nc_oc=AQlnGcnqptQQZZC2ssw_mVUJY3OL5CzMA_2hp5GQtIg_0HMCwMmn9q28KrUoNRmgbtU&_nc_ht=scontent.xx&oh=71d9a7a103ab2f477e27840eb92ac030&oe=5E040D0C', + 'subscriberCount': 366664, + 'url': 'https://www.facebook.com/32109457015', + 'platform': 'Facebook', + 'platformId': '32109457015', + 'verified': True + }, + 'summary': { + 'shareCount': 1051, + 'loveCount': 177, + 'totalInteractionCount': 3025, + 'wowCount': 60, + 'sadCount': 40, + 'angryCount': 591, + 'thankfulCount': 0, + 'postCount': 8, + 'interactionRate': 0.10309095595043452, + 'likeCount': 923, + 'hahaCount': 23, + 'commentCount': 160 + }, + 'breakdown': { + 'link': { + 'shareCount': 1051, + 'loveCount': 177, + 'totalInteractionCount': 3025, + 'wowCount': 60, + 'sadCount': 40, + 'angryCount': 591, + 'thankfulCount': 0, + 'postCount': 8, + 'interactionRate': 0.10309095595043452, + 'likeCount': 923, + 'hahaCount': 23, + 'commentCount': 160 + } + }, + 'subscriberData': { + 'initialCount': 366669, + 'finalCount': 366664 + } + }], + 'pagination': { + } + } } diff --git a/test/test_crowdtangle/link_post.py b/test/test_crowdtangle/link_post.py index d1ee893766..c9580b36a8 100644 --- a/test/test_crowdtangle/link_post.py +++ b/test/test_crowdtangle/link_post.py @@ -1,749 +1,687 @@ # flake8: noqa expected_post = { - "status": 200, - "result": { - "posts": [ - { - "id": 70175022660, - "platformId": "155869377766434_3572995539387117", - "platform": "Facebook", - "date": "2019-09-07 23:57:09", - "updated": "2019-09-08 01:29:50", - "type": "link", - "title": "Trump says he was set to hold secret talks with Taliban at Camp David in the US", - "caption": "nbcnews.com", - "description": " ", - "message": "BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://nbcnews.to/34stfC2", - "expandedLinks": [ - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma&fbclid=IwAR0CBM_4FHMh8nmjiAlK-SwCMI5z15Uppifb0j2UFphPdoYI_7aib4nNkio", - }, - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma&fbclid=IwAR0CBM_4FHMh8nmjiAlK-SwCMI5z15Uppifb0j2UFphPdoYI_7aib4nNkio", - }, - ], - "link": "https://nbcnews.to/34stfC2", - "postUrl": "https://www.facebook.com/NBCNews/posts/3572995539387117", - "subscriberCount": 9970622, - "score": 28.904564315352697, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 532, - "shareCount": 1380, - "commentCount": 2021, - "loveCount": 19, - "wowCount": 329, - "hahaCount": 1764, - "sadCount": 71, - "angryCount": 850, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 54, - "shareCount": 54, - "commentCount": 42, - "loveCount": 5, - "wowCount": 18, - "hahaCount": 6, - "sadCount": 46, - "angryCount": 16, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13889, - "name": "NBC News", - "handle": "NBCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/58460954_3259154034104604_4667908299973197824_n.png?_nc_cat=1&_nc_oc=AQkP72-xbAw6uUN-KZG8hLfS-bT5o6BRIMSNURKuXBbEhrFa7sT75fvZfTBZDVa21CU&_nc_ht=scontent.xx&oh=ddb1e61de6dabbf61e903f59efde1f0c&oe=5DF7A653", - "subscriberCount": 9970540, - "url": "https://www.facebook.com/155869377766434", - "platform": "Facebook", - "platformId": "155869377766434", - "verified": True, - }, - }, - { - "id": 70177674731, - "platformId": "170776332994065_2998688290202841", - "platform": "Facebook", - "date": "2019-09-08 01:12:19", - "updated": "2019-09-08 01:29:59", - "type": "link", - "title": "Trump says he was set to hold secret talks with Taliban at Camp David in the US", - "caption": "nbcnews.com", - "description": " ", - "message": "In conversations with NBC News, U.S. officials, foreign diplomats briefed on the discussions, Afghan officials and Taliban representatives had given no indication in recent days that the Trump administration had plans to invite the group to Camp David for a meeting with the president.", - "expandedLinks": [ - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma", - } - ], - "link": "https://nbcnews.to/34stfC2", - "postUrl": "https://www.facebook.com/ExposingFacts/posts/2998688290202841", - "subscriberCount": 66830, - "score": 1.6666666666666667, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 5, - "shareCount": 13, - "commentCount": 11, - "loveCount": 0, - "wowCount": 0, - "hahaCount": 27, - "sadCount": 1, - "angryCount": 8, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 6, - "shareCount": 8, - "commentCount": 5, - "loveCount": 2, - "wowCount": 2, - "hahaCount": 7, - "sadCount": 2, - "angryCount": 7, - "thankfulCount": 0, - }, - }, - "account": { - "id": 5481160, - "name": "Exposing Facts to the Misinformed Viewers of Fox News", - "handle": "ExposingFacts", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/29791466_1891625647575783_5327369811012051734_n.png?_nc_cat=103&_nc_oc=AQmRjv5awn3jrYTRpn66YTk-NhU0IvncCRP7Iqj2mG5ZyNmwvmdlDez45CE2ee2usHc&_nc_ht=scontent.xx&oh=23653b13f9c687f292b2b1fab40cae33&oe=5E0B9ECC", - "subscriberCount": 66830, - "url": "https://www.facebook.com/170776332994065", - "platform": "Facebook", - "platformId": "170776332994065", - "verified": False, - }, - }, - { - "id": 70176283081, - "platformId": "479182798944301_1177890119073562", - "platform": "Facebook", - "date": "2019-09-08 00:32:44", - "updated": "2019-09-08 01:29:32", - "type": "link", - "title": "Trump says he was set to hold secret talks with Taliban at Camp David in the US", - "caption": "nbcnews.com", - "description": " ", - "expandedLinks": [ - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma", - } - ], - "link": "https://nbcnews.to/34stfC2", - "postUrl": "https://www.facebook.com/KaiReedWBAL/posts/1177890119073562", - "subscriberCount": 1105, - "score": -26.0, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 0, - "shareCount": 0, - "commentCount": 0, - "loveCount": 0, - "wowCount": 0, - "hahaCount": 0, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 2, - "shareCount": 1, - "commentCount": 2, - "loveCount": 1, - "wowCount": 1, - "hahaCount": 2, - "sadCount": 2, - "angryCount": 2, - "thankfulCount": 0, - }, - }, - "account": { - "id": 1410256, - "name": "Kai Reed", - "handle": "KaiReedWBAL", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49900434_1024683774394198_3651308069498912768_n.jpg?_nc_cat=103&_nc_oc=AQnYzq2mAMzEDxyoeHe7Au6tN0lL0_0T9c7xnQe0TOj8JaBsiDVJsg1cSQK6VsuoUSc&_nc_ht=scontent.xx&oh=6908847b5171acc795a630585e7fcc44&oe=5DFBE356", - "subscriberCount": 1105, - "url": "https://www.facebook.com/479182798944301", - "platform": "Facebook", - "platformId": "479182798944301", - "verified": True, - }, - }, - { - "id": 70176242553, - "platformId": "500484703494525_1200896563453332", - "platform": "Facebook", - "date": "2019-09-08 00:32:13", - "updated": "2019-09-08 01:29:01", - "type": "link", - "title": "NBC News", - "description": "BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://nbcnews.to/34stfC2", - "expandedLinks": [ - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma", - }, - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma", - }, - ], - "link": "https://nbcnews.to/34stfC2", - "postUrl": "https://www.facebook.com/OKCDemocraticSocialists/posts/1200896563453332", - "subscriberCount": 2122, - "score": -2.75, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 0, - "shareCount": 2, - "commentCount": 1, - "loveCount": 0, - "wowCount": 1, - "hahaCount": 4, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 3, - "shareCount": 3, - "commentCount": 2, - "loveCount": 2, - "wowCount": 2, - "hahaCount": 3, - "sadCount": 3, - "angryCount": 4, - "thankfulCount": 0, - }, - }, - "account": { - "id": 3235562, - "name": "Oklahoma City Democratic Socialists", - "handle": "OKCDemocraticSocialists", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/21271017_746409325568727_7908860970547257116_n.jpg?_nc_cat=110&_nc_oc=AQntHEOTiUw_Bfs60fX1JidxuFmWwr2ycwtOckLWKScy-NkHpoMeIYhFJ14tvQUvvgg&_nc_ht=scontent.xx&oh=0bbc6037fad5bd3f601e780e58087d5a&oe=5DFF3476", - "subscriberCount": 2122, - "url": "https://www.facebook.com/500484703494525", - "platform": "Facebook", - "platformId": "500484703494525", - "verified": False, - }, - }, - { - "id": 70176142197, - "platformId": "263193710426380_2618804768198584", - "platform": "Facebook", - "date": "2019-09-08 00:24:36", - "updated": "2019-09-08 01:43:54", - "type": "link", - "title": "NBC News", - "description": "BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://nbcnews.to/34stfC2", - "message": "Wrap your mind around it. The President is claiming he was going to host the leaders of the Taliban, on US soil, days before the anniversary of 9/11. But then he decided not to... After they killed people. Recently.", - "expandedLinks": [ - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma", - }, - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma", - }, - ], - "link": "https://nbcnews.to/34stfC2", - "postUrl": "https://www.facebook.com/elizabethjgrattan/posts/2618804768198584", - "subscriberCount": 2777, - "score": -1.65, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 1, - "shareCount": 3, - "commentCount": 3, - "loveCount": 0, - "wowCount": 1, - "hahaCount": 3, - "sadCount": 0, - "angryCount": 9, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 4, - "shareCount": 4, - "commentCount": 2, - "loveCount": 3, - "wowCount": 3, - "hahaCount": 3, - "sadCount": 4, - "angryCount": 10, - "thankfulCount": 0, - }, - }, - "account": { - "id": 1403672, - "name": "Elizabeth Grattan", - "handle": "elizabethjgrattan", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/c0.0.200.200a/p200x200/10462787_693047747440972_8859242313743021660_n.jpg?_nc_cat=105&_nc_oc=AQnFSB9IuJSeRfYCTz3LAiVrfwQ0uI8MYqdF6IOdvr0nEaUr7zaT1-u4nmJgdqjvAro&_nc_ht=scontent.xx&oh=21969263d4e6dc6fe13b3a8b82987826&oe=5E0B4493", - "subscriberCount": 2777, - "url": "https://www.facebook.com/263193710426380", - "platform": "Facebook", - "platformId": "263193710426380", - "verified": True, - }, - }, - { - "id": 70176839156, - "platformId": "10157226754140781", - "platform": "Facebook", - "date": "2019-09-08 00:22:18", - "updated": "2019-09-08 01:00:17", - "type": "link", - "title": "Trump says he was set to hold secret talks with Taliban at Camp David in the US", - "caption": "nbcnews.com", - "description": " ", - "message": "Yeahhhhhhhhhhhhh right", - "expandedLinks": [ - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://nbcnews.to/34stfC2", - } - ], - "link": "https://nbcnews.to/34stfC2", - "postUrl": "https://www.facebook.com/IamLarryLynn/posts/10157226754140781", - "subscriberCount": 602, - "score": -16.0, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 0, - "shareCount": 0, - "commentCount": 0, - "loveCount": 0, - "wowCount": 0, - "hahaCount": 0, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 2, - "shareCount": 2, - "commentCount": 0, - "loveCount": 1, - "wowCount": 1, - "hahaCount": 2, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - }, - "account": { - "id": 3540509, - "name": "Larry Lynn", - "handle": "IamLarryLynn", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p50x50/65394340_10157053905880781_4941448582302531584_n.jpg?_nc_cat=103&_nc_oc=AQmhMBnI-wAvbKRMyeLaqGP-KVVfwzzy6GNaBz_oXg5xb8PThygdYvI0aWpuFQcpFoU&_nc_ht=scontent.xx&oh=e2d2f09c65f63c95fb1e481c7c2dca80&oe=5E082A18", - "subscriberCount": 602, - "url": "https://facebook.com/IamLarryLynn", - "platform": "Facebook", - "verified": True, - }, - }, - { - "id": 70176114921, - "platformId": "168628206619310_1402037439945041", - "platform": "Facebook", - "date": "2019-09-08 00:19:38", - "updated": "2019-09-08 01:43:14", - "type": "link", - "title": "Trump says he was set to hold secret talks with Taliban at Camp David in the US", - "caption": "nbcnews.com", - "description": " ", - "expandedLinks": [ - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma", - } - ], - "link": "https://nbcnews.to/34stfC2", - "postUrl": "https://www.facebook.com/FortWaynesNBC/posts/1402037439945041", - "subscriberCount": 30704, - "score": -1.5277777777777777, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 5, - "shareCount": 2, - "commentCount": 16, - "loveCount": 0, - "wowCount": 3, - "hahaCount": 3, - "sadCount": 0, - "angryCount": 7, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 6, - "shareCount": 10, - "commentCount": 5, - "loveCount": 6, - "wowCount": 4, - "hahaCount": 5, - "sadCount": 13, - "angryCount": 6, - "thankfulCount": 0, - }, - }, - "account": { - "id": 42773, - "name": "Fort Wayne's NBC", - "handle": "FortWaynesNBC", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/28378676_979036218911834_2561844683055476141_n.jpg?_nc_cat=106&_nc_oc=AQkM5DawRxB5n4e28sgL8kFjsikj05ODttgQEogM71K0e4XE7G4UGS-VYE5c7O6wb-A&_nc_ht=scontent.xx&oh=400980bca410ff7aa7636d5a2da5720a&oe=5E14EDF2", - "subscriberCount": 30704, - "url": "https://www.facebook.com/168628206619310", - "platform": "Facebook", - "platformId": "168628206619310", - "verified": True, - }, - }, - { - "id": 70175930004, - "platformId": "1347071985421819_2210227459106263", - "platform": "Facebook", - "date": "2019-09-08 00:15:13", - "updated": "2019-09-08 01:39:40", - "type": "link", - "title": "NBC News", - "description": "BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://nbcnews.to/34stfC2", - "expandedLinks": [ - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma", - }, - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma", - }, - ], - "link": "https://nbcnews.to/34stfC2", - "postUrl": "https://www.facebook.com/TiffanyLaneKSN/posts/2210227459106263", - "subscriberCount": 1652, - "score": -3.5, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 4, - "shareCount": 0, - "commentCount": 0, - "loveCount": 0, - "wowCount": 1, - "hahaCount": 1, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 4, - "shareCount": 3, - "commentCount": 2, - "loveCount": 2, - "wowCount": 2, - "hahaCount": 3, - "sadCount": 3, - "angryCount": 2, - "thankfulCount": 0, - }, - }, - "account": { - "id": 2619277, - "name": "Tiffany Lane KSN", - "handle": "TiffanyLaneKSN", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/23561789_1347072212088463_2246668714920855149_n.jpg?_nc_cat=107&_nc_oc=AQnZDnhp7-uirbeBJFPQ7QsI-fi8APj-_SttxiD62881HSe3FPdah5OxNb8BYC95JzM&_nc_ht=scontent.xx&oh=f7dc140f09610a3ccb91b5e1289961e8&oe=5E0EFD45", - "subscriberCount": 1652, - "url": "https://www.facebook.com/1347071985421819", - "platform": "Facebook", - "platformId": "1347071985421819", - "verified": False, - }, - }, - { - "id": 70175692433, - "platformId": "1859483880770155_2628920380493164", - "platform": "Facebook", - "date": "2019-09-08 00:13:03", - "updated": "2019-09-08 01:21:18", - "type": "link", - "title": "Trump says he was set to hold secret talks with Taliban at Camp David in the US", - "caption": "nbcnews.com", - "description": " ", - "message": "I can't quit laughing! The comments are great!!! https://www.facebook.com/155869377766434/posts/3572995539387117/", - "expandedLinks": [ - { - "original": "https://www.facebook.com/155869377766434/posts/3572995539387117/", - "expanded": "https://www.facebook.com/155869377766434/posts/3572995539387117/", - }, - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://nbcnews.to/34stfC2", - }, - ], - "link": "https://nbcnews.to/34stfC2", - "postUrl": "https://www.facebook.com/groups/1859483880770155/permalink/2628920380493164", - "subscriberCount": 10065, - "score": 4.0, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 8, - "shareCount": 34, - "commentCount": 42, - "loveCount": 0, - "wowCount": 8, - "hahaCount": 58, - "sadCount": 0, - "angryCount": 14, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 5, - "shareCount": 5, - "commentCount": 5, - "loveCount": 5, - "wowCount": 2, - "hahaCount": 7, - "sadCount": 2, - "angryCount": 10, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6610541, - "name": "The “Original” Funny Trump Memes", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-0/c19.0.50.50a/p50x50/67884429_10157587566374138_1513236713479077888_n.jpg?_nc_cat=108&_nc_oc=AQl4RDR-0KEX3GfZtivxGGD5_XY0BPsp188WHgMd7pUHgFAZYsS19F8N1H8CV-xKqck&_nc_ht=scontent.xx&oh=a6c7de50f5e44eb097047c7b1c9f1937&oe=5E141DC6", - "subscriberCount": 10066, - "url": "https://www.facebook.com/1859483880770155", - "platform": "Facebook", - "platformId": "1859483880770155", - "verified": False, - }, - }, - { - "id": 70175484410, - "platformId": "371655802904606_2993321680737992", - "platform": "Facebook", - "date": "2019-09-08 00:09:38", - "updated": "2019-09-08 01:34:13", - "type": "link", - "title": "Trump says he was set to hold secret talks with Taliban at Camp David in the US", - "caption": "nbcnews.com", - "description": " ", - "message": '"Trump says" is not how you lead a story with his track record of 10,000+ lies and counting. You need to fact check. Do you have an independent source confirming? That is how journalism USED to work. Otherwise I refer you to the Narcissist playbook: create a situation, say you solved it. This is nothing but an attempt to redirect the media, successful since NBC just posted it, away from his other gaffs this week. This is nothing but an attempt to "be presidential" and "show strength" when there is no confirmation that such a meeting was ever discussed, let alone planned. But he cancelled it, yeah, because he is in charge. Yup, nothing to see here. Would you also run a story saying he was going to meet with Santa Claus to talk about a visit to the White House in December, but had to cancel for an important trip to meet Kim in North Korea? (HINT: Santa isn\'t real) Narcissism-101, and the media is falling for it by LEADING with his nonsense without ANY 3rd party confirmation that the Afghan President even was aware of any of this, without any 3rd party confirmation that anyone in our government, not beholden to Trump, knew about this. Journalism is now about sensational headlines, and revenue generating clicks, and not about confirming their source before publishing. SMH', - "expandedLinks": [ - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma&fbclid=IwAR0eanqLCSl20VXhRTl9NGouEiSmN6iIzjBnbNX4qr8nrtyjKxKz3dQPAKE", - } - ], - "link": "https://nbcnews.to/34stfC2", - "postUrl": "https://www.facebook.com/LiberalIdentity/posts/2993321680737992", - "subscriberCount": 109406, - "score": 4.122222222222222, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 72, - "shareCount": 64, - "commentCount": 58, - "loveCount": 2, - "wowCount": 13, - "hahaCount": 102, - "sadCount": 8, - "angryCount": 52, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 15, - "shareCount": 26, - "commentCount": 8, - "loveCount": 4, - "wowCount": 4, - "hahaCount": 6, - "sadCount": 5, - "angryCount": 22, - "thankfulCount": 0, - }, - }, - "account": { - "id": 1643177, - "name": "Liberal Identity", - "handle": "LiberalIdentity", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1966670_616816825055168_94398373_n.jpg?_nc_cat=1&_nc_oc=AQlkU5jU0IAMIEM90d7BwalNT6XZ1Ao-s20pwA0_LEf6H6R8aucZVDdN_1T6qM-rLUI&_nc_ht=scontent.xx&oh=5919477490e2a1c5b3bae1dc0f711c2f&oe=5E0CEE2D", - "subscriberCount": 109454, - "url": "https://www.facebook.com/371655802904606", - "platform": "Facebook", - "platformId": "371655802904606", - "verified": False, - }, - }, - { - "id": 70175463296, - "platformId": "379673542547579_690041071510823", - "platform": "Facebook", - "date": "2019-09-08 00:00:44", - "updated": "2019-09-08 01:34:00", - "type": "link", - "title": "NBC News", - "description": "BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://nbcnews.to/34stfC2", - "expandedLinks": [ - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma", - }, - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma", - }, - ], - "link": "https://nbcnews.to/34stfC2", - "postUrl": "https://www.facebook.com/CiaraEncinasKYMA/posts/690041071510823", - "subscriberCount": 1809, - "score": -3.8333333333333335, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 3, - "shareCount": 0, - "commentCount": 1, - "loveCount": 0, - "wowCount": 0, - "hahaCount": 1, - "sadCount": 0, - "angryCount": 1, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 4, - "shareCount": 4, - "commentCount": 2, - "loveCount": 2, - "wowCount": 3, - "hahaCount": 2, - "sadCount": 3, - "angryCount": 3, - "thankfulCount": 0, - }, - }, - "account": { - "id": 3409486, - "name": "Ciara Encinas KYMA", - "handle": "CiaraEncinasKYMA", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/42898860_493391077842491_5671079035296284672_n.jpg?_nc_cat=110&_nc_oc=AQnUR3LFc1pjjn762jb-ZaxbLszVU-b9emCjFTv7YaZ6fhk3ukXZcjFTDQae40d56qs&_nc_ht=scontent.xx&oh=3ea3988506d4fb15828d853e4c50e154&oe=5DF480EC", - "subscriberCount": 1809, - "url": "https://www.facebook.com/379673542547579", - "platform": "Facebook", - "platformId": "379673542547579", - "verified": False, - }, - }, - ], - "pagination": {}, - }, + 'status': 200, + 'result': { + 'posts': [{ + 'id': 70175022660, + 'platformId': '155869377766434_3572995539387117', + 'platform': 'Facebook', + 'date': '2019-09-07 23:57:09', + 'updated': '2019-09-08 01:29:50', + 'type': 'link', + 'title': 'Trump says he was set to hold secret talks with Taliban at Camp David in the US', + 'caption': 'nbcnews.com', + 'description': ' ', + 'message': 'BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://nbcnews.to/34stfC2', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma&fbclid=IwAR0CBM_4FHMh8nmjiAlK-SwCMI5z15Uppifb0j2UFphPdoYI_7aib4nNkio' + }, { + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma&fbclid=IwAR0CBM_4FHMh8nmjiAlK-SwCMI5z15Uppifb0j2UFphPdoYI_7aib4nNkio' + }], + 'link': 'https://nbcnews.to/34stfC2', + 'postUrl': 'https://www.facebook.com/NBCNews/posts/3572995539387117', + 'subscriberCount': 9970622, + 'score': 28.904564315352697, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 532, + 'shareCount': 1380, + 'commentCount': 2021, + 'loveCount': 19, + 'wowCount': 329, + 'hahaCount': 1764, + 'sadCount': 71, + 'angryCount': 850, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 54, + 'shareCount': 54, + 'commentCount': 42, + 'loveCount': 5, + 'wowCount': 18, + 'hahaCount': 6, + 'sadCount': 46, + 'angryCount': 16, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13889, + 'name': 'NBC News', + 'handle': 'NBCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/58460954_3259154034104604_4667908299973197824_n.png?_nc_cat=1&_nc_oc=AQkP72-xbAw6uUN-KZG8hLfS-bT5o6BRIMSNURKuXBbEhrFa7sT75fvZfTBZDVa21CU&_nc_ht=scontent.xx&oh=ddb1e61de6dabbf61e903f59efde1f0c&oe=5DF7A653', + 'subscriberCount': 9970540, + 'url': 'https://www.facebook.com/155869377766434', + 'platform': 'Facebook', + 'platformId': '155869377766434', + 'verified': True + } + }, { + 'id': 70177674731, + 'platformId': '170776332994065_2998688290202841', + 'platform': 'Facebook', + 'date': '2019-09-08 01:12:19', + 'updated': '2019-09-08 01:29:59', + 'type': 'link', + 'title': 'Trump says he was set to hold secret talks with Taliban at Camp David in the US', + 'caption': 'nbcnews.com', + 'description': ' ', + 'message': 'In conversations with NBC News, U.S. officials, foreign diplomats briefed on the discussions, Afghan officials and Taliban representatives had given no indication in recent days that the Trump administration had plans to invite the group to Camp David for a meeting with the president.', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma' + }], + 'link': 'https://nbcnews.to/34stfC2', + 'postUrl': 'https://www.facebook.com/ExposingFacts/posts/2998688290202841', + 'subscriberCount': 66830, + 'score': 1.6666666666666667, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 5, + 'shareCount': 13, + 'commentCount': 11, + 'loveCount': 0, + 'wowCount': 0, + 'hahaCount': 27, + 'sadCount': 1, + 'angryCount': 8, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 6, + 'shareCount': 8, + 'commentCount': 5, + 'loveCount': 2, + 'wowCount': 2, + 'hahaCount': 7, + 'sadCount': 2, + 'angryCount': 7, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 5481160, + 'name': 'Exposing Facts to the Misinformed Viewers of Fox News', + 'handle': 'ExposingFacts', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/29791466_1891625647575783_5327369811012051734_n.png?_nc_cat=103&_nc_oc=AQmRjv5awn3jrYTRpn66YTk-NhU0IvncCRP7Iqj2mG5ZyNmwvmdlDez45CE2ee2usHc&_nc_ht=scontent.xx&oh=23653b13f9c687f292b2b1fab40cae33&oe=5E0B9ECC', + 'subscriberCount': 66830, + 'url': 'https://www.facebook.com/170776332994065', + 'platform': 'Facebook', + 'platformId': '170776332994065', + 'verified': False + } + }, { + 'id': 70176283081, + 'platformId': '479182798944301_1177890119073562', + 'platform': 'Facebook', + 'date': '2019-09-08 00:32:44', + 'updated': '2019-09-08 01:29:32', + 'type': 'link', + 'title': 'Trump says he was set to hold secret talks with Taliban at Camp David in the US', + 'caption': 'nbcnews.com', + 'description': ' ', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma' + }], + 'link': 'https://nbcnews.to/34stfC2', + 'postUrl': 'https://www.facebook.com/KaiReedWBAL/posts/1177890119073562', + 'subscriberCount': 1105, + 'score': -26.0, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 0, + 'shareCount': 0, + 'commentCount': 0, + 'loveCount': 0, + 'wowCount': 0, + 'hahaCount': 0, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 2, + 'shareCount': 1, + 'commentCount': 2, + 'loveCount': 1, + 'wowCount': 1, + 'hahaCount': 2, + 'sadCount': 2, + 'angryCount': 2, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 1410256, + 'name': 'Kai Reed', + 'handle': 'KaiReedWBAL', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49900434_1024683774394198_3651308069498912768_n.jpg?_nc_cat=103&_nc_oc=AQnYzq2mAMzEDxyoeHe7Au6tN0lL0_0T9c7xnQe0TOj8JaBsiDVJsg1cSQK6VsuoUSc&_nc_ht=scontent.xx&oh=6908847b5171acc795a630585e7fcc44&oe=5DFBE356', + 'subscriberCount': 1105, + 'url': 'https://www.facebook.com/479182798944301', + 'platform': 'Facebook', + 'platformId': '479182798944301', + 'verified': True + } + }, { + 'id': 70176242553, + 'platformId': '500484703494525_1200896563453332', + 'platform': 'Facebook', + 'date': '2019-09-08 00:32:13', + 'updated': '2019-09-08 01:29:01', + 'type': 'link', + 'title': 'NBC News', + 'description': 'BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://nbcnews.to/34stfC2', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma' + }, { + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma' + }], + 'link': 'https://nbcnews.to/34stfC2', + 'postUrl': 'https://www.facebook.com/OKCDemocraticSocialists/posts/1200896563453332', + 'subscriberCount': 2122, + 'score': -2.75, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 0, + 'shareCount': 2, + 'commentCount': 1, + 'loveCount': 0, + 'wowCount': 1, + 'hahaCount': 4, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 3, + 'shareCount': 3, + 'commentCount': 2, + 'loveCount': 2, + 'wowCount': 2, + 'hahaCount': 3, + 'sadCount': 3, + 'angryCount': 4, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 3235562, + 'name': 'Oklahoma City Democratic Socialists', + 'handle': 'OKCDemocraticSocialists', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/21271017_746409325568727_7908860970547257116_n.jpg?_nc_cat=110&_nc_oc=AQntHEOTiUw_Bfs60fX1JidxuFmWwr2ycwtOckLWKScy-NkHpoMeIYhFJ14tvQUvvgg&_nc_ht=scontent.xx&oh=0bbc6037fad5bd3f601e780e58087d5a&oe=5DFF3476', + 'subscriberCount': 2122, + 'url': 'https://www.facebook.com/500484703494525', + 'platform': 'Facebook', + 'platformId': '500484703494525', + 'verified': False + } + }, { + 'id': 70176142197, + 'platformId': '263193710426380_2618804768198584', + 'platform': 'Facebook', + 'date': '2019-09-08 00:24:36', + 'updated': '2019-09-08 01:43:54', + 'type': 'link', + 'title': 'NBC News', + 'description': 'BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://nbcnews.to/34stfC2', + 'message': 'Wrap your mind around it. The President is claiming he was going to host the leaders of the Taliban, on US soil, days before the anniversary of 9/11. But then he decided not to... After they killed people. Recently.', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma' + }, { + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma' + }], + 'link': 'https://nbcnews.to/34stfC2', + 'postUrl': 'https://www.facebook.com/elizabethjgrattan/posts/2618804768198584', + 'subscriberCount': 2777, + 'score': -1.65, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 1, + 'shareCount': 3, + 'commentCount': 3, + 'loveCount': 0, + 'wowCount': 1, + 'hahaCount': 3, + 'sadCount': 0, + 'angryCount': 9, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 4, + 'shareCount': 4, + 'commentCount': 2, + 'loveCount': 3, + 'wowCount': 3, + 'hahaCount': 3, + 'sadCount': 4, + 'angryCount': 10, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 1403672, + 'name': 'Elizabeth Grattan', + 'handle': 'elizabethjgrattan', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/c0.0.200.200a/p200x200/10462787_693047747440972_8859242313743021660_n.jpg?_nc_cat=105&_nc_oc=AQnFSB9IuJSeRfYCTz3LAiVrfwQ0uI8MYqdF6IOdvr0nEaUr7zaT1-u4nmJgdqjvAro&_nc_ht=scontent.xx&oh=21969263d4e6dc6fe13b3a8b82987826&oe=5E0B4493', + 'subscriberCount': 2777, + 'url': 'https://www.facebook.com/263193710426380', + 'platform': 'Facebook', + 'platformId': '263193710426380', + 'verified': True + } + }, { + 'id': 70176839156, + 'platformId': '10157226754140781', + 'platform': 'Facebook', + 'date': '2019-09-08 00:22:18', + 'updated': '2019-09-08 01:00:17', + 'type': 'link', + 'title': 'Trump says he was set to hold secret talks with Taliban at Camp David in the US', + 'caption': 'nbcnews.com', + 'description': ' ', + 'message': 'Yeahhhhhhhhhhhhh right', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://nbcnews.to/34stfC2' + }], + 'link': 'https://nbcnews.to/34stfC2', + 'postUrl': 'https://www.facebook.com/IamLarryLynn/posts/10157226754140781', + 'subscriberCount': 602, + 'score': -16.0, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 0, + 'shareCount': 0, + 'commentCount': 0, + 'loveCount': 0, + 'wowCount': 0, + 'hahaCount': 0, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 2, + 'shareCount': 2, + 'commentCount': 0, + 'loveCount': 1, + 'wowCount': 1, + 'hahaCount': 2, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 3540509, + 'name': 'Larry Lynn', + 'handle': 'IamLarryLynn', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p50x50/65394340_10157053905880781_4941448582302531584_n.jpg?_nc_cat=103&_nc_oc=AQmhMBnI-wAvbKRMyeLaqGP-KVVfwzzy6GNaBz_oXg5xb8PThygdYvI0aWpuFQcpFoU&_nc_ht=scontent.xx&oh=e2d2f09c65f63c95fb1e481c7c2dca80&oe=5E082A18', + 'subscriberCount': 602, + 'url': 'https://facebook.com/IamLarryLynn', + 'platform': 'Facebook', + 'verified': True + } + }, { + 'id': 70176114921, + 'platformId': '168628206619310_1402037439945041', + 'platform': 'Facebook', + 'date': '2019-09-08 00:19:38', + 'updated': '2019-09-08 01:43:14', + 'type': 'link', + 'title': 'Trump says he was set to hold secret talks with Taliban at Camp David in the US', + 'caption': 'nbcnews.com', + 'description': ' ', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma' + }], + 'link': 'https://nbcnews.to/34stfC2', + 'postUrl': 'https://www.facebook.com/FortWaynesNBC/posts/1402037439945041', + 'subscriberCount': 30704, + 'score': -1.5277777777777777, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 5, + 'shareCount': 2, + 'commentCount': 16, + 'loveCount': 0, + 'wowCount': 3, + 'hahaCount': 3, + 'sadCount': 0, + 'angryCount': 7, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 6, + 'shareCount': 10, + 'commentCount': 5, + 'loveCount': 6, + 'wowCount': 4, + 'hahaCount': 5, + 'sadCount': 13, + 'angryCount': 6, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 42773, + 'name': "Fort Wayne's NBC", + 'handle': 'FortWaynesNBC', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/28378676_979036218911834_2561844683055476141_n.jpg?_nc_cat=106&_nc_oc=AQkM5DawRxB5n4e28sgL8kFjsikj05ODttgQEogM71K0e4XE7G4UGS-VYE5c7O6wb-A&_nc_ht=scontent.xx&oh=400980bca410ff7aa7636d5a2da5720a&oe=5E14EDF2', + 'subscriberCount': 30704, + 'url': 'https://www.facebook.com/168628206619310', + 'platform': 'Facebook', + 'platformId': '168628206619310', + 'verified': True + } + }, { + 'id': 70175930004, + 'platformId': '1347071985421819_2210227459106263', + 'platform': 'Facebook', + 'date': '2019-09-08 00:15:13', + 'updated': '2019-09-08 01:39:40', + 'type': 'link', + 'title': 'NBC News', + 'description': 'BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://nbcnews.to/34stfC2', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma' + }, { + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma' + }], + 'link': 'https://nbcnews.to/34stfC2', + 'postUrl': 'https://www.facebook.com/TiffanyLaneKSN/posts/2210227459106263', + 'subscriberCount': 1652, + 'score': -3.5, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 4, + 'shareCount': 0, + 'commentCount': 0, + 'loveCount': 0, + 'wowCount': 1, + 'hahaCount': 1, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 4, + 'shareCount': 3, + 'commentCount': 2, + 'loveCount': 2, + 'wowCount': 2, + 'hahaCount': 3, + 'sadCount': 3, + 'angryCount': 2, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 2619277, + 'name': 'Tiffany Lane KSN', + 'handle': 'TiffanyLaneKSN', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/23561789_1347072212088463_2246668714920855149_n.jpg?_nc_cat=107&_nc_oc=AQnZDnhp7-uirbeBJFPQ7QsI-fi8APj-_SttxiD62881HSe3FPdah5OxNb8BYC95JzM&_nc_ht=scontent.xx&oh=f7dc140f09610a3ccb91b5e1289961e8&oe=5E0EFD45', + 'subscriberCount': 1652, + 'url': 'https://www.facebook.com/1347071985421819', + 'platform': 'Facebook', + 'platformId': '1347071985421819', + 'verified': False + } + }, { + 'id': 70175692433, + 'platformId': '1859483880770155_2628920380493164', + 'platform': 'Facebook', + 'date': '2019-09-08 00:13:03', + 'updated': '2019-09-08 01:21:18', + 'type': 'link', + 'title': 'Trump says he was set to hold secret talks with Taliban at Camp David in the US', + 'caption': 'nbcnews.com', + 'description': ' ', + 'message': "I can't quit laughing! The comments are great!!! https://www.facebook.com/155869377766434/posts/3572995539387117/", + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/155869377766434/posts/3572995539387117/', + 'expanded': 'https://www.facebook.com/155869377766434/posts/3572995539387117/' + }, { + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://nbcnews.to/34stfC2' + }], + 'link': 'https://nbcnews.to/34stfC2', + 'postUrl': 'https://www.facebook.com/groups/1859483880770155/permalink/2628920380493164', + 'subscriberCount': 10065, + 'score': 4.0, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 8, + 'shareCount': 34, + 'commentCount': 42, + 'loveCount': 0, + 'wowCount': 8, + 'hahaCount': 58, + 'sadCount': 0, + 'angryCount': 14, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 5, + 'shareCount': 5, + 'commentCount': 5, + 'loveCount': 5, + 'wowCount': 2, + 'hahaCount': 7, + 'sadCount': 2, + 'angryCount': 10, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6610541, + 'name': 'The “Original” Funny Trump Memes', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-0/c19.0.50.50a/p50x50/67884429_10157587566374138_1513236713479077888_n.jpg?_nc_cat=108&_nc_oc=AQl4RDR-0KEX3GfZtivxGGD5_XY0BPsp188WHgMd7pUHgFAZYsS19F8N1H8CV-xKqck&_nc_ht=scontent.xx&oh=a6c7de50f5e44eb097047c7b1c9f1937&oe=5E141DC6', + 'subscriberCount': 10066, + 'url': 'https://www.facebook.com/1859483880770155', + 'platform': 'Facebook', + 'platformId': '1859483880770155', + 'verified': False + } + }, { + 'id': 70175484410, + 'platformId': '371655802904606_2993321680737992', + 'platform': 'Facebook', + 'date': '2019-09-08 00:09:38', + 'updated': '2019-09-08 01:34:13', + 'type': 'link', + 'title': 'Trump says he was set to hold secret talks with Taliban at Camp David in the US', + 'caption': 'nbcnews.com', + 'description': ' ', + 'message': '"Trump says" is not how you lead a story with his track record of 10,000+ lies and counting. You need to fact check. Do you have an independent source confirming? That is how journalism USED to work. Otherwise I refer you to the Narcissist playbook: create a situation, say you solved it. This is nothing but an attempt to redirect the media, successful since NBC just posted it, away from his other gaffs this week. This is nothing but an attempt to "be presidential" and "show strength" when there is no confirmation that such a meeting was ever discussed, let alone planned. But he cancelled it, yeah, because he is in charge. Yup, nothing to see here. Would you also run a story saying he was going to meet with Santa Claus to talk about a visit to the White House in December, but had to cancel for an important trip to meet Kim in North Korea? (HINT: Santa isn\'t real) Narcissism-101, and the media is falling for it by LEADING with his nonsense without ANY 3rd party confirmation that the Afghan President even was aware of any of this, without any 3rd party confirmation that anyone in our government, not beholden to Trump, knew about this. Journalism is now about sensational headlines, and revenue generating clicks, and not about confirming their source before publishing. SMH', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma&fbclid=IwAR0eanqLCSl20VXhRTl9NGouEiSmN6iIzjBnbNX4qr8nrtyjKxKz3dQPAKE' + }], + 'link': 'https://nbcnews.to/34stfC2', + 'postUrl': 'https://www.facebook.com/LiberalIdentity/posts/2993321680737992', + 'subscriberCount': 109406, + 'score': 4.122222222222222, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 72, + 'shareCount': 64, + 'commentCount': 58, + 'loveCount': 2, + 'wowCount': 13, + 'hahaCount': 102, + 'sadCount': 8, + 'angryCount': 52, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 15, + 'shareCount': 26, + 'commentCount': 8, + 'loveCount': 4, + 'wowCount': 4, + 'hahaCount': 6, + 'sadCount': 5, + 'angryCount': 22, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 1643177, + 'name': 'Liberal Identity', + 'handle': 'LiberalIdentity', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1966670_616816825055168_94398373_n.jpg?_nc_cat=1&_nc_oc=AQlkU5jU0IAMIEM90d7BwalNT6XZ1Ao-s20pwA0_LEf6H6R8aucZVDdN_1T6qM-rLUI&_nc_ht=scontent.xx&oh=5919477490e2a1c5b3bae1dc0f711c2f&oe=5E0CEE2D', + 'subscriberCount': 109454, + 'url': 'https://www.facebook.com/371655802904606', + 'platform': 'Facebook', + 'platformId': '371655802904606', + 'verified': False + } + }, { + 'id': 70175463296, + 'platformId': '379673542547579_690041071510823', + 'platform': 'Facebook', + 'date': '2019-09-08 00:00:44', + 'updated': '2019-09-08 01:34:00', + 'type': 'link', + 'title': 'NBC News', + 'description': 'BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://nbcnews.to/34stfC2', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma' + }, { + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma' + }], + 'link': 'https://nbcnews.to/34stfC2', + 'postUrl': 'https://www.facebook.com/CiaraEncinasKYMA/posts/690041071510823', + 'subscriberCount': 1809, + 'score': -3.8333333333333335, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 3, + 'shareCount': 0, + 'commentCount': 1, + 'loveCount': 0, + 'wowCount': 0, + 'hahaCount': 1, + 'sadCount': 0, + 'angryCount': 1, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 4, + 'shareCount': 4, + 'commentCount': 2, + 'loveCount': 2, + 'wowCount': 3, + 'hahaCount': 2, + 'sadCount': 3, + 'angryCount': 3, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 3409486, + 'name': 'Ciara Encinas KYMA', + 'handle': 'CiaraEncinasKYMA', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/42898860_493391077842491_5671079035296284672_n.jpg?_nc_cat=110&_nc_oc=AQnUR3LFc1pjjn762jb-ZaxbLszVU-b9emCjFTv7YaZ6fhk3ukXZcjFTDQae40d56qs&_nc_ht=scontent.xx&oh=3ea3988506d4fb15828d853e4c50e154&oe=5DF480EC', + 'subscriberCount': 1809, + 'url': 'https://www.facebook.com/379673542547579', + 'platform': 'Facebook', + 'platformId': '379673542547579', + 'verified': False + } + }], + 'pagination': {} + } } diff --git a/test/test_crowdtangle/post.py b/test/test_crowdtangle/post.py index c3c7eba7cf..d39b71eb63 100644 --- a/test/test_crowdtangle/post.py +++ b/test/test_crowdtangle/post.py @@ -1,6681 +1,6161 @@ # flake8: noqa expected_posts = { - "status": 200, - "result": { - "posts": [ - { - "id": 70157825663, - "platformId": "6491828674_10157706657658675", - "platform": "Facebook", - "date": "2019-09-07 19:04:03", - "updated": "2019-09-08 00:41:10", - "type": "link", - "title": "NOAA assailed for defending Trump's Hurricane Dorian claim", - "caption": "pbs.org", - "description": "WASHINGTON — Former top officials of the National Oceanic and Atmospheric Administration are assailing the agency for undermining its weather forecasters as it defends President Donald Trump's statement from days ago that Hurricane Dorian threatened Alabama. They say NOAA's action risks the credib...", - "message": "Former top officials of the National Oceanic and Atmospheric Administration are assailing the agency for undermining its weather forecasters as it defends President Trump’s statement from days ago that Hurricane Dorian threatened Alabama.", - "expandedLinks": [ - { - "original": "https://www.pbs.org/newshour/politics/noaa-assailed-for-defending-trumps-hurricane-dorian-claim", - "expanded": "https://www.pbs.org/newshour/politics/noaa-assailed-for-defending-trumps-hurricane-dorian-claim", - } - ], - "link": "https://www.pbs.org/newshour/politics/noaa-assailed-for-defending-trumps-hurricane-dorian-claim", - "postUrl": "https://www.facebook.com/newshour/posts/10157706657658675", - "subscriberCount": 1417173, - "score": 36.10762331838565, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQAQChtFBtJknmoi&w=720&h=720&url=https%3A%2F%2Fd3i6fh83elv35t.cloudfront.net%2Fstatic%2F2019%2F09%2FRTS2OSQS-1024x683.jpg&cfs=1&_nc_hash=AQCCwh6oo6LQnKD3", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAsEhaTWfH-MVd9&url=https%3A%2F%2Fd3i6fh83elv35t.cloudfront.net%2Fstatic%2F2019%2F09%2FRTS2OSQS-1024x683.jpg&_nc_hash=AQA7Rw511Jqi6fCQ", - } - ], - "statistics": { - "actual": { - "likeCount": 3056, - "shareCount": 1745, - "commentCount": 1021, - "loveCount": 63, - "wowCount": 242, - "hahaCount": 419, - "sadCount": 204, - "angryCount": 1302, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 71, - "shareCount": 44, - "commentCount": 36, - "loveCount": 5, - "wowCount": 11, - "hahaCount": 10, - "sadCount": 26, - "angryCount": 20, - "thankfulCount": 0, - }, - }, - "account": { - "id": 7777, - "name": "PBS NewsHour", - "handle": "newshour", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/c2.0.200.200a/p200x200/303161_10150312469923675_881915800_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlncoeS4CvKUmO2uTUydTKWAioHD0iWx6bl9DqkBkwnCZgpb6CCkyZj7aidr38Ug1k&_nc_ht=scontent.xx&oh=0d6d1417f6b982eac877d479f2404a37&oe=5E0E2C5A", - "subscriberCount": 1417219, - "url": "https://www.facebook.com/6491828674", - "platform": "Facebook", - "platformId": "6491828674", - "verified": True, - }, - }, - { - "id": 70175022660, - "platformId": "155869377766434_3572995539387117", - "platform": "Facebook", - "date": "2019-09-07 23:57:09", - "updated": "2019-09-08 00:20:11", - "type": "link", - "title": "Trump says he was set to hold secret talks with Taliban at Camp David in the US", - "caption": "nbcnews.com", - "description": " ", - "message": "BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://nbcnews.to/34stfC2", - "expandedLinks": [ - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma&fbclid=IwAR0CBM_4FHMh8nmjiAlK-SwCMI5z15Uppifb0j2UFphPdoYI_7aib4nNkio", - }, - { - "original": "https://nbcnews.to/34stfC2", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma&fbclid=IwAR0CBM_4FHMh8nmjiAlK-SwCMI5z15Uppifb0j2UFphPdoYI_7aib4nNkio", - }, - ], - "link": "https://nbcnews.to/34stfC2", - "postUrl": "https://www.facebook.com/NBCNews/posts/3572995539387117", - "subscriberCount": 9970622, - "score": 35.17213114754098, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 321, - "shareCount": 732, - "commentCount": 1276, - "loveCount": 10, - "wowCount": 212, - "hahaCount": 1133, - "sadCount": 43, - "angryCount": 564, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 27, - "shareCount": 20, - "commentCount": 25, - "loveCount": 6, - "wowCount": 9, - "hahaCount": 12, - "sadCount": 12, - "angryCount": 11, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13889, - "name": "NBC News", - "handle": "NBCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/58460954_3259154034104604_4667908299973197824_n.png?_nc_cat=1&_nc_oc=AQkP72-xbAw6uUN-KZG8hLfS-bT5o6BRIMSNURKuXBbEhrFa7sT75fvZfTBZDVa21CU&_nc_ht=scontent.xx&oh=ddb1e61de6dabbf61e903f59efde1f0c&oe=5DF7A653", - "subscriberCount": 9970540, - "url": "https://www.facebook.com/155869377766434", - "platform": "Facebook", - "platformId": "155869377766434", - "verified": True, - }, - }, - { - "id": 70166530441, - "platformId": "5281959998_10152010079459999", - "platform": "Facebook", - "date": "2019-09-07 22:01:31", - "updated": "2019-09-08 00:47:27", - "type": "link", - "title": "2019 U.S. Open Live Updates: Bianca Andreescu Defeats Serena Williams", - "caption": "nytimes.com", - "description": "Andreescu, a 19-year-old Canadian, won her first Grand Slam title, denying Williams her 24th.", - "message": "Breaking News: Bianca Andreescu, 19, has defeated Serena Williams to win the U.S. Open, capping a stunning rise to the top of women’s tennis.", - "expandedLinks": [ - { - "original": "https://www.nytimes.com/2019/09/07/sports/tennis/us-open-serena-williams-bianca-andreescu.html?smid=fb-nytimes&smtyp=cur", - "expanded": "https://www.nytimes.com/2019/09/07/sports/tennis/us-open-serena-williams-bianca-andreescu.html?smid=fb-nytimes&smtyp=cur", - } - ], - "link": "https://www.nytimes.com/2019/09/07/sports/tennis/us-open-serena-williams-bianca-andreescu.html?smid=fb-nytimes&smtyp=cur", - "postUrl": "https://www.facebook.com/nytimes/posts/10152010079459999", - "subscriberCount": 16854203, - "score": 35.01030927835052, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCq231_1hMcQsgV&w=550&h=550&url=https%3A%2F%2Fstatic01.nyt.com%2Fimages%2F2019%2F09%2F07%2Fsports%2F07open-women-live-serena2%2F07open-women-live-serena2-facebookJumbo.jpg&cfs=1&sx=340&sy=0&sw=550&sh=550&_nc_hash=AQBLh_V9dCVShHEK", - "height": 550, - "width": 550, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBU8mPAEayRbkd7&url=https%3A%2F%2Fstatic01.nyt.com%2Fimages%2F2019%2F09%2F07%2Fsports%2F07open-women-live-serena2%2F07open-women-live-serena2-facebookJumbo.jpg&_nc_hash=AQDiFMN6i1MXQLjS", - } - ], - "statistics": { - "actual": { - "likeCount": 6493, - "shareCount": 2008, - "commentCount": 702, - "loveCount": 1335, - "wowCount": 2493, - "hahaCount": 51, - "sadCount": 479, - "angryCount": 23, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 161, - "shareCount": 61, - "commentCount": 75, - "loveCount": 11, - "wowCount": 19, - "hahaCount": 23, - "sadCount": 17, - "angryCount": 21, - "thankfulCount": 0, - }, - }, - "account": { - "id": 7132, - "name": "The New York Times", - "handle": "nytimes", - "profileImage": "https://scontent.xx.fbcdn.net/v/t34.0-1/p200x200/38987133_2766049203424553_1238434690_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkaWRCuHf9GL6ACpzc33xhzk0PaoZZpZJjgHAUJqYB_x5SH2TI2LqBRTlosS59Dtlw&_nc_ht=scontent.xx&oh=6c30114417175d395e99d2e75167ad16&oe=5D765D57", - "subscriberCount": 16854715, - "url": "https://www.facebook.com/5281959998", - "platform": "Facebook", - "platformId": "5281959998", - "verified": True, - }, - }, - { - "id": 70161391741, - "platformId": "13312631635_10157392232686636", - "platform": "Facebook", - "date": "2019-09-07 20:18:42", - "updated": "2019-09-08 00:42:43", - "type": "link", - "title": "Amber Rudd resigns as Boris Johnson's government plunged into further chaos", - "caption": "independent.co.uk", - "description": 'Amber Rudd has resigned and plans to run as an independent candidate in a future general election. The cabinet minister told The Sunday Times she was resigning because of Boris Johnson\'s "purge" of the party. More follows…', - "message": "BREAKING", - "expandedLinks": [ - { - "original": "https://www.independent.co.uk/news/uk/politics/amber-rudd-resign-boris-johnson-cabinet-conservatives-brexit-a9096146.html", - "expanded": "https://www.independent.co.uk/news/uk/politics/amber-rudd-resign-boris-johnson-cabinet-conservatives-brexit-a9096146.html", - } - ], - "link": "https://www.independent.co.uk/news/uk/politics/amber-rudd-resign-boris-johnson-cabinet-conservatives-brexit-a9096146.html", - "postUrl": "https://www.facebook.com/TheIndependentOnline/posts/10157392232686636", - "subscriberCount": 8832865, - "score": 33.56692913385827, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBCGewJje1f67Rg&w=720&h=720&url=https%3A%2F%2Fstatic.independent.co.uk%2Fs3fs-public%2Fthumbnails%2Fimage%2F2018%2F09%2F26%2F17%2Fbreaking-4.png&cfs=1&_nc_hash=AQBfdoe_AAyA9BFz", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDVU76tPdyR4Lts&url=https%3A%2F%2Fstatic.independent.co.uk%2Fs3fs-public%2Fthumbnails%2Fimage%2F2018%2F09%2F26%2F17%2Fbreaking-4.png&_nc_hash=AQAf9SJj09GBLcIW", - } - ], - "statistics": { - "actual": { - "likeCount": 3194, - "shareCount": 2442, - "commentCount": 682, - "loveCount": 365, - "wowCount": 283, - "hahaCount": 1538, - "sadCount": 10, - "angryCount": 12, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 87, - "shareCount": 43, - "commentCount": 55, - "loveCount": 9, - "wowCount": 9, - "hahaCount": 27, - "sadCount": 8, - "angryCount": 16, - "thankfulCount": 0, - }, - }, - "account": { - "id": 19065, - "name": "The Independent", - "handle": "TheIndependentOnline", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/11051795_10152732082756636_6705742038347351188_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQmApCC_log9_TfPU5-TLVRKHyBo2YH6UPG2d6R-43r5u7HhElr7QPKk9J_AXR9q1Ac&_nc_ht=scontent.xx&oh=47ac79067cb2e33520f6920eb409611d&oe=5E0FED75", - "subscriberCount": 8834731, - "url": "https://www.facebook.com/13312631635", - "platform": "Facebook", - "platformId": "13312631635", - "verified": True, - }, - }, - { - "id": 70157398709, - "platformId": "6250307292_10159057208257293", - "platform": "Facebook", - "date": "2019-09-07 19:00:01", - "updated": "2019-09-08 00:18:11", - "type": "link", - "title": "NOAA’s support of President Trump over its own scientists provokes mass uproar in weather community", - "caption": "washingtonpost.com", - "description": "Weather forecasters inside and outside the government and former leaders of NOAA and the Weather Service have spoken against NOAA's decision.", - "message": "Weather forecasters inside and outside the government and former leaders of NOAA and the Weather Service have spoken against NOAA's decision.", - "expandedLinks": [ - { - "original": "https://www.washingtonpost.com/weather/2019/09/07/noaas-support-president-trump-over-its-own-scientists-provokes-mass-uproar-weather-community/?tid=sm_fb", - "expanded": "https://www.washingtonpost.com/weather/2019/09/07/noaas-support-president-trump-over-its-own-scientists-provokes-mass-uproar-weather-community/?tid=sm_fb", - } - ], - "link": "https://www.washingtonpost.com/weather/2019/09/07/noaas-support-president-trump-over-its-own-scientists-provokes-mass-uproar-weather-community/?tid=sm_fb", - "postUrl": "https://www.facebook.com/washingtonpost/posts/10159057208257293", - "subscriberCount": 6289171, - "score": 28.168560606060606, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQAwZem4zr3cX-NP&w=720&h=720&url=https%3A%2F%2Fwww.washingtonpost.com%2Fresizer%2FkQCso98jOGkmOTc9hfSlGydUsOM%3D%2F1484x0%2Farc-anglerfish-washpost-prod-washpost.s3.amazonaws.com%2Fpublic%2FD2RJSBBJSJHILN2XJ3O5GQIKBA.png&cfs=1&_nc_hash=AQA2NtYOouKVPrCp", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQB4ljiz_z3LoSEg&url=https%3A%2F%2Fwww.washingtonpost.com%2Fresizer%2FkQCso98jOGkmOTc9hfSlGydUsOM%3D%2F1484x0%2Farc-anglerfish-washpost-prod-washpost.s3.amazonaws.com%2Fpublic%2FD2RJSBBJSJHILN2XJ3O5GQIKBA.png&_nc_hash=AQDVp-1s7L_A_3Ld", - } - ], - "statistics": { - "actual": { - "likeCount": 5375, - "shareCount": 3264, - "commentCount": 1291, - "loveCount": 84, - "wowCount": 902, - "hahaCount": 261, - "sadCount": 331, - "angryCount": 3365, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 183, - "shareCount": 94, - "commentCount": 86, - "loveCount": 11, - "wowCount": 27, - "hahaCount": 37, - "sadCount": 43, - "angryCount": 47, - "thankfulCount": 0, - }, - }, - "account": { - "id": 10337, - "name": "Washington Post", - "handle": "washingtonpost", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/21430382_10156479428327293_4985425836902947855_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlVAdyvl5eHjwkppWx8pvifrl3XbqjhakYzwfQ1AHjPFaQPjFxNF4BbZq5BQ1nys4Y&_nc_ht=scontent.xx&oh=6cea07f8fc3edae1f7c743fc8997901c&oe=5DC8AB0A", - "subscriberCount": 6289503, - "url": "https://www.facebook.com/6250307292", - "platform": "Facebook", - "platformId": "6250307292", - "verified": True, - }, - }, - { - "id": 70157976062, - "platformId": "354522044588660_3473929489314551", - "platform": "Facebook", - "date": "2019-09-07 19:00:24", - "updated": "2019-09-08 00:39:40", - "type": "link", - "title": "A mother-and-daughter team have developed what may be the world's first Alzheimer’s vaccine.", - "caption": "upworthy.com", - "description": "Alzheimer's is a terrible disease that robs a person of their personality and memory before eventually leading to death. It's the sixth-largest killer in the U.S. and, currently, there are 5.8…", - "message": "This could be huge.", - "expandedLinks": [ - { - "original": "https://buff.ly/2MXnMOa", - "expanded": "https://www.upworthy.com/a-mother-and-daughter-team-have-developed-what-may-be-the-worlds-first-alzheimers-vaccine", - } - ], - "link": "https://buff.ly/2MXnMOa", - "postUrl": "https://www.facebook.com/Upworthy/posts/3473929489314551", - "subscriberCount": 11752205, - "score": 27.886194029850746, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d71a49b713b6d095d69ee08%2F92bcb6d0ada52001f20e776814129c522da2386c_883a50e22f1efb9063f4cb3154f04b2c7cd83e52_facebook&cfs=1&_nc_hash=AQDe8CpgkSIGZoni", - "height": 600, - "width": 600, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCoYDTqFI4MiK1i&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d71a49b713b6d095d69ee08%2F92bcb6d0ada52001f20e776814129c522da2386c_883a50e22f1efb9063f4cb3154f04b2c7cd83e52_facebook&_nc_hash=AQBfZm7y0NWkSc6X", - } - ], - "statistics": { - "actual": { - "likeCount": 6491, - "shareCount": 4238, - "commentCount": 319, - "loveCount": 1719, - "wowCount": 2161, - "hahaCount": 14, - "sadCount": 4, - "angryCount": 1, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 281, - "shareCount": 111, - "commentCount": 48, - "loveCount": 51, - "wowCount": 14, - "hahaCount": 15, - "sadCount": 9, - "angryCount": 7, - "thankfulCount": 0, - }, - }, - "account": { - "id": 3919, - "name": "Upworthy", - "handle": "Upworthy", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1914363_1176320005742189_4709951186905632219_n.png?_nc_cat=1&_nc_oc=AQlPiX5mYxZC_Xj8_M4a7JZZvCD27izvAXTMtobXrLjwA4S5Pel-CsMh5GMouHt8LNg&_nc_ht=scontent.xx&oh=ba4e0db7c2521356dc17108d8aa4a12a&oe=5E04D944", - "subscriberCount": 11752205, - "url": "https://www.facebook.com/354522044588660", - "platform": "Facebook", - "platformId": "354522044588660", - "verified": True, - }, - }, - { - "id": 70165824138, - "platformId": "140738092630206_2613271415376849", - "platform": "Facebook", - "date": "2019-09-07 21:50:01", - "updated": "2019-09-08 00:19:09", - "type": "link", - "title": "Miley Cyrus vows to not have children till climate change is solved and fish are 'in the water'", - "caption": "theblaze.com", - "description": "She wants any children to be able to 'live on an earth with fish in the water'", - "message": 'She wants any children to be able to "live on an earth with fish in the water."', - "expandedLinks": [ - { - "original": "https://bit.ly/2Lxlp1f", - "expanded": "https://www.theblaze.com/news/miley-cyrus-vows-to-not-have-children-till-climate-change-is-solved-and-fish-are-in-the-water?utm_content=buffer95e36&utm_medium=organic&utm_source=facebook&utm_campaign=fb-theblaze", - } - ], - "link": "https://bit.ly/2Lxlp1f", - "postUrl": "https://www.facebook.com/TheBlaze/posts/2613271415376849", - "subscriberCount": 2089159, - "score": 17.958217270194986, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBP8jNPeMx4HEtr&w=720&h=720&url=https%3A%2F%2Ftheblaze-img.rbl.ms%2Fsimage%2Fhttps%253A%252F%252Fassets.rbl.ms%252F19878079%252F1200x600.jpg%2F2000%252C2000%2FNZZZMKyAwJ5%252BbJLi%2Fimg.jpg&cfs=1&sx=217&sy=0&sw=1000&sh=1000&_nc_hash=AQAT_MAUTaHqDTM2", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAdLFFRmrWHlqEa&url=https%3A%2F%2Ftheblaze-img.rbl.ms%2Fsimage%2Fhttps%253A%252F%252Fassets.rbl.ms%252F19878079%252F1200x600.jpg%2F2000%252C2000%2FNZZZMKyAwJ5%252BbJLi%2Fimg.jpg&_nc_hash=AQCY9HQEogoRJVy4", - } - ], - "statistics": { - "actual": { - "likeCount": 747, - "shareCount": 679, - "commentCount": 2277, - "loveCount": 158, - "wowCount": 63, - "hahaCount": 2476, - "sadCount": 17, - "angryCount": 30, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 65, - "shareCount": 65, - "commentCount": 92, - "loveCount": 4, - "wowCount": 17, - "hahaCount": 43, - "sadCount": 8, - "angryCount": 65, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6892, - "name": "TheBlaze", - "handle": "TheBlaze", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/47350623_2141870595850269_7864140219111440384_n.png?_nc_cat=1&_nc_oc=AQmGyVQswjmmaInAkgMKbLJ62jAcb2BShbL78435-MqCEBLedhKr7VO97Nzxt2x220k&_nc_ht=scontent.xx&oh=4a5ce0b44b6400aab9bb78aa2afdee87&oe=5E011864", - "subscriberCount": 2089166, - "url": "https://www.facebook.com/140738092630206", - "platform": "Facebook", - "platformId": "140738092630206", - "verified": True, - }, - }, - { - "id": 70158921605, - "platformId": "40656699159_10156974744419160", - "platform": "Facebook", - "date": "2019-09-07 19:30:02", - "updated": "2019-09-08 00:30:26", - "type": "link", - "title": "Beto: Americans Will Willingly Surrender Guns — It'll Be The Law", - "caption": "washingtonexaminer.com", - "description": "MANCHESTER, New Hampshire — Former Texas Democratic Rep. Beto O’Rourke, a White House presidential contender, confirmed Saturday at the New Hampshire Democratic Party Convention that his mandatory firearm buyback plan would not include law enforcement door knocks.", - "message": "Would you comply with a mandatory buyback program?", - "expandedLinks": [ - { - "original": "https://washex.am/2N0eq4j", - "expanded": "https://www.washingtonexaminer.com/news/beto-orourke-i-dont-see-the-policemen-going-door-to-door-with-my-mandatory-gun-buyback-plan", - } - ], - "link": "https://washex.am/2N0eq4j", - "postUrl": "https://www.facebook.com/WashingtonExaminer/posts/10156974744419160", - "subscriberCount": 714637, - "score": 17.656521739130437, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQAKuUJV91AX25Yb&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5c42324500c63f68be6da5c2%2F5d73fa34e6dd180f722ad479%2Fb963a3b05ef838a45f688712f3d863f2.original.jpg&cfs=1&_nc_hash=AQBZqcfcgz2rRjAX", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDyJATdiB8d3Y9i&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5c42324500c63f68be6da5c2%2F5d73fa34e6dd180f722ad479%2Fb963a3b05ef838a45f688712f3d863f2.original.jpg&_nc_hash=AQALGOQFLGNI7dm-", - } - ], - "statistics": { - "actual": { - "likeCount": 122, - "shareCount": 758, - "commentCount": 1595, - "loveCount": 5, - "wowCount": 15, - "hahaCount": 1024, - "sadCount": 7, - "angryCount": 535, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 65, - "shareCount": 40, - "commentCount": 62, - "loveCount": 7, - "wowCount": 7, - "hahaCount": 19, - "sadCount": 3, - "angryCount": 27, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13991, - "name": "Washington Examiner", - "handle": "WashingtonExaminer", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/36928610_10156017618514160_6905131952433528832_n.jpg?_nc_cat=111&_nc_oc=AQnKuEJBvxlMgc-zQHzSfEtsgFfHehn1pucacRbqrYlmmQp69EGwogOuyEUo-OV8OWM&_nc_ht=scontent.xx&oh=88b1063a5362110cc87fb9d6caedea35&oe=5DFE6885", - "subscriberCount": 714626, - "url": "https://www.facebook.com/40656699159", - "platform": "Facebook", - "platformId": "40656699159", - "verified": True, - }, - }, - { - "id": 70175854512, - "platformId": "182919686769_10156515300916770", - "platform": "Facebook", - "date": "2019-09-08 00:00:12", - "updated": "2019-09-08 00:40:44", - "type": "link", - "title": "Chicago Mayor Blames Republicans For City’s Violence", - "caption": "dailycaller.com", - "description": "'Keep our name out of your mouth'", - "message": "Craziness in Chicago...", - "expandedLinks": [ - { - "original": "https://dailycaller.com/2019/09/03/chicago-mayor-blames-republicans-gun-violence/", - "expanded": "https://dailycaller.com/2019/09/03/chicago-mayor-blames-republicans-gun-violence/", - } - ], - "link": "https://dailycaller.com/2019/09/03/chicago-mayor-blames-republicans-gun-violence/", - "postUrl": "https://www.facebook.com/DailyCaller/posts/10156515300916770", - "subscriberCount": 5408115, - "score": 17.35958904109589, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQA54EEroC3nEHay&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f06e2545380e7440c885%2F33d0f3248d5c22cd6f7692bf469ed35a5ab97f26_fb9b8998b7e16da2c675a1ab25b4b260d8c0dcc2_facebook&cfs=1&_nc_hash=AQDnUYQYto-6jboY", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDc5HzVX1lJnosX&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f06e2545380e7440c885%2F33d0f3248d5c22cd6f7692bf469ed35a5ab97f26_fb9b8998b7e16da2c675a1ab25b4b260d8c0dcc2_facebook&_nc_hash=AQAQYgQAvvwFmxWw", - } - ], - "statistics": { - "actual": { - "likeCount": 232, - "shareCount": 776, - "commentCount": 1586, - "loveCount": 4, - "wowCount": 88, - "hahaCount": 1653, - "sadCount": 41, - "angryCount": 689, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 83, - "shareCount": 39, - "commentCount": 79, - "loveCount": 7, - "wowCount": 11, - "hahaCount": 40, - "sadCount": 5, - "angryCount": 28, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13489, - "name": "The Daily Caller", - "handle": "DailyCaller", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471", - "subscriberCount": 5408115, - "url": "https://www.facebook.com/182919686769", - "platform": "Facebook", - "platformId": "182919686769", - "verified": True, - }, - }, - { - "id": 70158922017, - "platformId": "268914272540_10156464022182541", - "platform": "Facebook", - "date": "2019-09-07 19:20:05", - "updated": "2019-09-08 00:30:46", - "type": "link", - "title": "Michigan trophy hunter who paid $400,000 to hunt rare black rhino allowed to import its horns, skin and skull to America", - "caption": "nydailynews.com", - "description": " ", - "message": "A Michigan man who shelled out $400K to hunt and kill a rare black rhinoceros in Africa last year will be allowed to import its skin, skull and horns to America.", - "expandedLinks": [ - { - "original": "https://trib.al/MQPSeDX", - "expanded": "https://www.nydailynews.com/news/national/ny-trophy-hunter-rare-black-rhino-permit-horns-skin-skull-20190907-yq7z3q3dlvhrxf2qam2iuydz2m-story.html?fbclid=IwAR1clOge0lIFgq-sBiUrOisjtvPs7gnFJFu2ci6Sh_cHpfUiQ1pU_wSjuFk", - } - ], - "link": "https://trib.al/MQPSeDX", - "postUrl": "https://www.facebook.com/NYDailyNews/posts/10156464022182541", - "subscriberCount": 3119682, - "score": 14.468468468468469, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBl8MfHRPDkK0dp&w=720&h=720&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2F7mCUiJ_3DITEHCIaQCpKbrw0Rxo%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2FL4OKZAE3QBANLGI7XZ25UQXW2Q.jpg&cfs=1&_nc_hash=AQAWKenM6Rp9QV0U", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBkqItNryNEh7Am&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2F7mCUiJ_3DITEHCIaQCpKbrw0Rxo%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2FL4OKZAE3QBANLGI7XZ25UQXW2Q.jpg&_nc_hash=AQDuG8_P3XVgFrCd", - } - ], - "statistics": { - "actual": { - "likeCount": 82, - "shareCount": 326, - "commentCount": 212, - "loveCount": 8, - "wowCount": 24, - "hahaCount": 2, - "sadCount": 68, - "angryCount": 884, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 28, - "shareCount": 26, - "commentCount": 17, - "loveCount": 4, - "wowCount": 9, - "hahaCount": 9, - "sadCount": 8, - "angryCount": 10, - "thankfulCount": 0, - }, - }, - "account": { - "id": 18752, - "name": "New York Daily News", - "handle": "NYDailyNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/34963357_10155516739962541_1916910854155010048_n.jpg?_nc_cat=1&_nc_oc=AQmjFK4eo-CK8fL21CSJr1btV3Al6e74byD7EyXVL8apaCEHf5ql7TW_ZRkUiYID0qY&_nc_ht=scontent.xx&oh=e33f579d2d00c6afc68a0e7cbd70b6c8&oe=5E0623E1", - "subscriberCount": 3120017, - "url": "https://www.facebook.com/268914272540", - "platform": "Facebook", - "platformId": "268914272540", - "verified": True, - }, - }, - { - "id": 70172878599, - "platformId": "210277954204_10156789911629205", - "platform": "Facebook", - "date": "2019-09-07 23:41:12", - "updated": "2019-09-08 00:39:40", - "type": "youtube", - "caption": "youtube.com", - "description": "San Francisco took a symbolic vote to declare the NRA a terrorist organization. Cenk Uygur and Ana Kasparian, hosts of The Young Turks, break it down. MORE T...", - "expandedLinks": [ - { - "original": "https://www.youtube.com/watch?v=-M53X2IrQes&feature=youtu.be", - "expanded": "https://www.youtube.com/watch?v=-M53X2IrQes&feature=youtu.be", - } - ], - "link": "https://www.youtube.com/watch?v=-M53X2IrQes&feature=youtu.be", - "postUrl": "https://www.facebook.com/TheYoungTurks/posts/10156789911629205", - "subscriberCount": 2099948, - "score": 13.402985074626866, - "media": [ - { - "type": "video", - "url": "https://www.youtube.com/embed/-M53X2IrQes?autoplay=1", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQD0ftgbO2TXPuL-&w=720&h=720&url=https%3A%2F%2Fi.ytimg.com%2Fvi%2F-M53X2IrQes%2Fmaxresdefault.jpg&cfs=1&_nc_hash=AQAJe0Z3lWqtjAjc", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBt2WjZ1yQI3VUu&w=1280&h=720&url=https%3A%2F%2Fi.ytimg.com%2Fvi%2F-M53X2IrQes%2Fmaxresdefault.jpg&crop&_nc_hash=AQDnZ7LCi31UOQos", - }, - ], - "statistics": { - "actual": { - "likeCount": 526, - "shareCount": 92, - "commentCount": 79, - "loveCount": 140, - "wowCount": 13, - "hahaCount": 43, - "sadCount": 0, - "angryCount": 5, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 19, - "shareCount": 10, - "commentCount": 11, - "loveCount": 5, - "wowCount": 2, - "hahaCount": 11, - "sadCount": 3, - "angryCount": 6, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6786, - "name": "The Young Turks", - "handle": "TheYoungTurks", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1003713_10151543513399205_523422522_n.jpg?_nc_cat=1&_nc_oc=AQnnXFBTIz-GDK79X4ZL1tWD8ZS5F3y_makkEyxpcCf_7U3QmoBvJjb9aWlpiMT8dro&_nc_ht=scontent.xx&oh=5684bdb9a01611f4ca6e9ea9dedbc57e&oe=5DF64CB5", - "subscriberCount": 2100186, - "url": "https://www.facebook.com/210277954204", - "platform": "Facebook", - "platformId": "210277954204", - "verified": True, - }, - }, - { - "id": 70175943871, - "platformId": "5550296508_10159878537516509", - "platform": "Facebook", - "date": "2019-09-08 00:33:39", - "updated": "2019-09-08 00:41:59", - "type": "link", - "title": "Alex Trebek is done with chemotherapy and back at work on 'Jeopardy!'", - "caption": "cnn.com", - "description": " ", - "message": "This beloved host of a long-running trivia show is back at work", - "expandedLinks": [ - { - "original": "https://cnn.it/34xMv0O", - "expanded": "https://www.cnn.com/2019/08/29/media/alex-trebek-chemo-jeopardy/index.html?utm_source=fbCNN&utm_content=2019-09-08T00%3A33%3A35&utm_term=link&utm_medium=social", - } - ], - "link": "https://cnn.it/34xMv0O", - "postUrl": "https://www.facebook.com/cnn/posts/10159878537516509", - "subscriberCount": 31389797, - "score": 13.347058823529412, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCd0-Oc1w-PqqN0&w=619&h=619&url=https%3A%2F%2Fcdn.cnn.com%2Fcnnnext%2Fdam%2Fassets%2F190308154523-alex-trebek-super-tease.jpg&cfs=1&sx=8&sy=0&sw=619&sh=619&_nc_hash=AQAQt7q9Fv_Md2Ab", - "height": 619, - "width": 619, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBLf0lDqhzVJPlY&url=https%3A%2F%2Fcdn.cnn.com%2Fcnnnext%2Fdam%2Fassets%2F190308154523-alex-trebek-super-tease.jpg&_nc_hash=AQAKZjUbHBnq8Pyp", - } - ], - "statistics": { - "actual": { - "likeCount": 1439, - "shareCount": 196, - "commentCount": 72, - "loveCount": 543, - "wowCount": 18, - "hahaCount": 0, - "sadCount": 0, - "angryCount": 1, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 60, - "shareCount": 26, - "commentCount": 33, - "loveCount": 7, - "wowCount": 14, - "hahaCount": 8, - "sadCount": 14, - "angryCount": 8, - "thankfulCount": 0, - }, - }, - "account": { - "id": 8323, - "name": "CNN", - "handle": "cnn", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12289622_10154246192721509_1897912583584847639_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQnmWKpivHkplQlHvH6RU7ER1noSOq6saypKUuDbSnV0FWNYEYghmJGPxBpmhJO8UsU&_nc_ht=scontent.xx&oh=12e2b35de35132a27c2772d3fe565936&oe=5DF3AC02", - "subscriberCount": 31389797, - "url": "https://www.facebook.com/5550296508", - "platform": "Facebook", - "platformId": "5550296508", - "verified": True, - }, - }, - { - "id": 70159872429, - "platformId": "341163402640457_2815563945200378", - "platform": "Facebook", - "date": "2019-09-07 19:45:00", - "updated": "2019-09-08 00:41:28", - "type": "native_video", - "description": "Police repeatedly punched this Black teen for ‘resisting’ — but new bodycam footage shows otherwise", - "message": "This teen is suing for negligence and emotional distress after police repeatedly punched him", - "expandedLinks": [ - { - "original": "https://www.facebook.com/NowThisPolitics/videos/513652776066740/", - "expanded": "https://www.facebook.com/NowThisPolitics/videos/513652776066740/", - } - ], - "link": "https://www.facebook.com/NowThisPolitics/videos/513652776066740/", - "postUrl": "https://www.facebook.com/NowThisNews/posts/2815563945200378", - "subscriberCount": 14557547, - "score": 12.941538461538462, - "media": [ - { - "type": "video", - "url": "https://video.xx.fbcdn.net/v/t42.9040-2/70744112_493008498145296_4014857848306532352_n.mp4?_nc_cat=106&efg=eyJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_log=1&_nc_oc=AQnIjFMc409QS5XESkpl6rChSVLmMz0ebAUqlNTlw5C-uk-7vc_noBr8hKrDpzFaj5A&_nc_ht=video.xx&oh=37fcc40c0b8fe5fe3400541ec2a6577e&oe=5D75970B", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t15.5256-10/p720x720/67896250_513654266066591_881517186823225344_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQnMPVnf-2Fgr7r1vVtMtspa6BCIOBZRHodyW1NsVAyGNpoYMGSfP_4aKH_6qHkZ6-c&_nc_ht=scontent.xx&oh=f48f82b31a66fcef783579b4673d16df&oe=5E023A0D", - "height": 720, - "width": 720, - "full": "https://scontent.xx.fbcdn.net/v/t15.5256-10/67896250_513654266066591_881517186823225344_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQnMPVnf-2Fgr7r1vVtMtspa6BCIOBZRHodyW1NsVAyGNpoYMGSfP_4aKH_6qHkZ6-c&_nc_ht=scontent.xx&oh=5aebdedebaed77988b7fa660957310ec&oe=5DF6E6A6", - }, - ], - "statistics": { - "actual": { - "likeCount": 838, - "shareCount": 2297, - "commentCount": 1081, - "loveCount": 10, - "wowCount": 245, - "hahaCount": 36, - "sadCount": 567, - "angryCount": 3338, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 279, - "shareCount": 175, - "commentCount": 86, - "loveCount": 30, - "wowCount": 23, - "hahaCount": 16, - "sadCount": 27, - "angryCount": 14, - "thankfulCount": 0, - }, - }, - "account": { - "id": 10247, - "name": "NowThis", - "handle": "NowThisNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/28379313_1840609126029203_6405012222846484702_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQkFdmIYy2uPLXX0xb7b7uQjQ-yiayvSBaPWqSlby_pCoW_1_Iybmu7xSmUb-UMr1gc&_nc_ht=scontent.xx&oh=add01854d7218f79e9aad6351846e535&oe=5E0CA890", - "subscriberCount": 14558656, - "url": "https://www.facebook.com/341163402640457", - "platform": "Facebook", - "platformId": "341163402640457", - "verified": True, - }, - "videoLengthMS": 136297, - }, - { - "id": 70158620099, - "platformId": "273864989376427_2989862727776626", - "platform": "Facebook", - "date": "2019-09-07 19:19:07", - "updated": "2019-09-08 00:44:08", - "type": "link", - "title": "Rep. Cleaver: Trump has turned presidency into ‘ATM machine’", - "caption": "msnbc.com", - "description": "POLITICO reports that an Air National Guard crew stopped overnight at President Trump’s resort in Scotland – and this layover is now being investigated by the House Oversight Committee. Rep. Emanuel Cleaver (D-MO) joins Alex Witt discuss this investigation into whether U.S. military spending has...", - "message": 'Rep. Cleaver says if report that military spending benefited Trump resort is true, "then Donald Trump has achieved something that I don’t think any president in our history has achieved or even attempted to achieve, and that is to corrupt the military."', - "expandedLinks": [ - { - "original": "https://on.msnbc.com/34vvmoJ", - "expanded": "https://www.msnbc.com/weekends-with-alex-witt/watch/rep-cleaver-trump-has-turned-presidency-into-atm-machine-68436037881?cid=sm_npd_ms_fb_ma", - } - ], - "link": "https://on.msnbc.com/34vvmoJ", - "postUrl": "https://www.facebook.com/msnbc/posts/2989862727776626", - "subscriberCount": 2290452, - "score": 12.633689839572192, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQAQ_4ojLTZ1pFrN&w=630&h=630&url=https%3A%2F%2Fmedia11.s-nbcnews.com%2Fj%2FMSNBC%2FComponents%2FVideo%2F201909%2Fn_witt_EmanuelCleaver_TrumpScotland_190907_1920x1080.nbcnews-fp-1200-630.jpg&cfs=1&sx=293&sy=0&sw=630&sh=630&_nc_hash=AQDt4JYiv6yr-ARF", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAF68YIIFGWkOMU&url=https%3A%2F%2Fmedia11.s-nbcnews.com%2Fj%2FMSNBC%2FComponents%2FVideo%2F201909%2Fn_witt_EmanuelCleaver_TrumpScotland_190907_1920x1080.nbcnews-fp-1200-630.jpg&_nc_hash=AQAEszyHppWr9rkP", - } - ], - "statistics": { - "actual": { - "likeCount": 1339, - "shareCount": 1498, - "commentCount": 425, - "loveCount": 18, - "wowCount": 75, - "hahaCount": 62, - "sadCount": 405, - "angryCount": 903, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 88, - "shareCount": 57, - "commentCount": 95, - "loveCount": 8, - "wowCount": 15, - "hahaCount": 31, - "sadCount": 18, - "angryCount": 62, - "thankfulCount": 0, - }, - }, - "account": { - "id": 8324, - "name": "MSNBC", - "handle": "msnbc", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15741035_1414682885294626_1846918595507309997_n.jpg?_nc_cat=1&_nc_oc=AQmNSDImiJ4dNS4a9BuTF3tFyF2W0xSOLxgQfdY6R_AXaZm8hkQc6XT-GWy5NIEe080&_nc_ht=scontent.xx&oh=968e2c2f1d76f19278ac5985b55af46d&oe=5E003BB2", - "subscriberCount": 2290512, - "url": "https://www.facebook.com/273864989376427", - "platform": "Facebook", - "platformId": "273864989376427", - "verified": True, - }, - }, - { - "id": 70168994896, - "platformId": "114050161948682_2662947170392289", - "platform": "Facebook", - "date": "2019-09-07 22:35:54", - "updated": "2019-09-08 00:45:04", - "type": "link", - "title": "Albertsons Companies joins supermarkets in changing guns policy", - "caption": "reuters.com", - "description": " ", - "message": "Supermarket operator Albertsons Companies said on Saturday it would ask customers not to openly carry firearms at its stores, joining an array of retailers and store chains this week who changed their gun policy in light of several mass shootings in the United States.", - "expandedLinks": [ - { - "original": "https://www.reuters.com/article/us-albertsons-guncontrol-idUSKCN1VS0QF?utm_campaign=trueAnthem%3A+Trending+Content&utm_content=5d7430b5145a57000153fe31&utm_medium=trueAnthem&utm_source=facebook", - "expanded": "https://www.reuters.com/article/us-albertsons-guncontrol-idUSKCN1VS0QF?utm_campaign=trueAnthem%3A+Trending+Content&utm_content=5d7430b5145a57000153fe31&utm_medium=trueAnthem&utm_source=facebook", - } - ], - "link": "https://www.reuters.com/article/us-albertsons-guncontrol-idUSKCN1VS0QF?utm_campaign=trueAnthem%3A+Trending+Content&utm_content=5d7430b5145a57000153fe31&utm_medium=trueAnthem&utm_source=facebook", - "postUrl": "https://www.facebook.com/Reuters/posts/2662947170392289", - "subscriberCount": 4154272, - "score": 12.384615384615385, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQB-pWZOAddTH2Dh&w=720&h=720&url=https%3A%2F%2Fs2.reutersmedia.net%2Fresources%2Fr%2F%3Fm%3D02%26d%3D20190907%26t%3D2%26i%3D1427373205%26w%3D1200%26r%3DLYNXNPEF8610E&cfs=1&_nc_hash=AQCPVsKpXiRqvauR", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAwf7DJd7fL-VnI&url=https%3A%2F%2Fs2.reutersmedia.net%2Fresources%2Fr%2F%3Fm%3D02%26d%3D20190907%26t%3D2%26i%3D1427373205%26w%3D1200%26r%3DLYNXNPEF8610E&_nc_hash=AQDhYVtqc1GGWjgO", - } - ], - "statistics": { - "actual": { - "likeCount": 512, - "shareCount": 74, - "commentCount": 87, - "loveCount": 109, - "wowCount": 4, - "hahaCount": 11, - "sadCount": 0, - "angryCount": 8, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 26, - "shareCount": 8, - "commentCount": 9, - "loveCount": 4, - "wowCount": 3, - "hahaCount": 6, - "sadCount": 5, - "angryCount": 4, - "thankfulCount": 0, - }, - }, - "account": { - "id": 10323, - "name": "Reuters", - "handle": "Reuters", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/51325614_2292147310805612_3874403780548100096_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlLN3v5RKOKT6LVQj--bvulAczkWupv1AuwaG14c3MkOAyF9oLoLGad6n1Rl6FhN6k&_nc_ht=scontent.xx&oh=73deaf953fbb14e82a9c92b2f850db23&oe=5E0ACADC", - "subscriberCount": 4154522, - "url": "https://www.facebook.com/114050161948682", - "platform": "Facebook", - "platformId": "114050161948682", - "verified": True, - }, - }, - { - "id": 70157942853, - "platformId": "532854420074062_3747993908560081", - "platform": "Facebook", - "date": "2019-09-07 19:03:00", - "updated": "2019-09-08 00:43:19", - "type": "link", - "title": "High Schooler Penalized 15 Yards for Praying in the End Zone", - "caption": "fanbuzz.com", - "description": "The list of things you are allowed to do after scoring a touchdown has gotten very short, particularly in high school. Elaborate celebrations have never been allowed at the high school or college l…", - "message": "WOW 😡", - "expandedLinks": [ - { - "original": "https://fanbuzz.com/national/high-schooler-penalized-15-yards-for-praying-in-the-endzone/?utm_source=facebook&utm_medium=agora&utm_term=faves&utm_campaign=faves", - "expanded": "https://fanbuzz.com/national/high-schooler-penalized-15-yards-for-praying-in-the-endzone/?utm_source=facebook&utm_medium=agora&utm_term=faves&utm_campaign=faves", - } - ], - "link": "https://fanbuzz.com/national/high-schooler-penalized-15-yards-for-praying-in-the-endzone/?utm_source=facebook&utm_medium=agora&utm_term=faves&utm_campaign=faves", - "postUrl": "https://www.facebook.com/thefavesusa/posts/3747993908560081", - "subscriberCount": 6323442, - "score": 12.014760147601477, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQD-IQrKwhdvG3JM&w=720&h=720&url=http%3A%2F%2Ffansrule.files.wordpress.com%2F2014%2F11%2Fknee.png%3Fw%3D1200%26h%3D627%26crop%3D1&cfs=1&_nc_hash=AQDrhZ6u7MuQyOka", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBBj0aMdnWPQ4KN&url=http%3A%2F%2Ffansrule.files.wordpress.com%2F2014%2F11%2Fknee.png%3Fw%3D1200%26h%3D627%26crop%3D1&_nc_hash=AQBbKfxbS6qdnJSt", - } - ], - "statistics": { - "actual": { - "likeCount": 188, - "shareCount": 885, - "commentCount": 580, - "loveCount": 5, - "wowCount": 79, - "hahaCount": 9, - "sadCount": 257, - "angryCount": 1253, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 64, - "shareCount": 87, - "commentCount": 52, - "loveCount": 7, - "wowCount": 15, - "hahaCount": 11, - "sadCount": 16, - "angryCount": 19, - "thankfulCount": 0, - }, - }, - "account": { - "id": 48728, - "name": "Faves USA", - "handle": "thefavesusa", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13590243_1529567430402751_5505197343663543097_n.jpg?_nc_cat=1&_nc_oc=AQlqHYa5f3hh3Tu7bwL_7yF5WVkxCnE2WIU8c_5Fs_eMudF84ODKZoLqn8S3lZDdt3g&_nc_ht=scontent.xx&oh=b45134ffcb1aa806ced2cb018887de04&oe=5E0ED98A", - "subscriberCount": 6323373, - "url": "https://www.facebook.com/532854420074062", - "platform": "Facebook", - "platformId": "532854420074062", - "verified": True, - }, - }, - { - "id": 70166547836, - "platformId": "908009612563863_2986545271376943", - "platform": "Facebook", - "date": "2019-09-07 22:00:27", - "updated": "2019-09-08 00:48:09", - "type": "native_video", - "message": "‘You ask a lot of stupid questions.’ — Pres. Trump has made a pastime out of attacking women journalists of color", - "expandedLinks": [ - { - "original": "https://www.facebook.com/NowThisPolitics/videos/818841295179184/", - "expanded": "https://www.facebook.com/NowThisPolitics/videos/818841295179184/", - } - ], - "link": "https://www.facebook.com/NowThisPolitics/videos/818841295179184/", - "postUrl": "https://www.facebook.com/NowThisPolitics/posts/2986545271376943", - "subscriberCount": 6074083, - "score": 11.583710407239819, - "media": [ - { - "type": "video", - "url": "https://video.xx.fbcdn.net/v/t42.9040-2/10000000_651586571919863_4335461527566942208_n.mp4?_nc_cat=111&efg=eyJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_log=1&_nc_oc=AQmNgZ66SHVdQFXtzxrzGHoWaVIlMDoLBwmnY9N9W4xHk1wkDz96S-h-1nZoDLC-MbM&_nc_ht=video.xx&oh=033f5207ca6ec2f6941f99b21660a396&oe=5D759CBE", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t15.5256-10/p720x720/67128103_793076731088974_6383828119333109760_n.jpg?_nc_cat=103&_nc_log=1&_nc_oc=AQl0V9WmrSLlnMwSjWBceYUVZflzfeIB4hjLR_IsJS-oRteBnkFurlOVv2cB2Cug7ak&_nc_ht=scontent.xx&oh=788fcd3a1948fe3b524810415b0f86a2&oe=5DC90F1E", - "height": 720, - "width": 720, - "full": "https://scontent.xx.fbcdn.net/v/t15.5256-10/67128103_793076731088974_6383828119333109760_n.jpg?_nc_cat=103&_nc_log=1&_nc_oc=AQl0V9WmrSLlnMwSjWBceYUVZflzfeIB4hjLR_IsJS-oRteBnkFurlOVv2cB2Cug7ak&_nc_ht=scontent.xx&oh=9dee08bdf149fa35842ba7ac4113fccc&oe=5E0ACC45", - }, - ], - "statistics": { - "actual": { - "likeCount": 679, - "shareCount": 1545, - "commentCount": 1931, - "loveCount": 27, - "wowCount": 132, - "hahaCount": 254, - "sadCount": 169, - "angryCount": 2943, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 239, - "shareCount": 196, - "commentCount": 103, - "loveCount": 27, - "wowCount": 19, - "hahaCount": 24, - "sadCount": 28, - "angryCount": 27, - "thankfulCount": 0, - }, - }, - "account": { - "id": 311636, - "name": "NowThis Politics", - "handle": "NowThisPolitics", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/28276603_1939096412788506_2850422809072819205_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlBSULvu9xr5smvB3kmRub5MfL3SpyPxNX94GEyc5skmb19swOR40nthDv1Kip3kcw&_nc_ht=scontent.xx&oh=b734d3faa39291c805198e3ad7de3450&oe=5DFF0890", - "subscriberCount": 6074746, - "url": "https://www.facebook.com/908009612563863", - "platform": "Facebook", - "platformId": "908009612563863", - "verified": True, - }, - "videoLengthMS": 216467, - }, - { - "id": 70167955465, - "platformId": "20446254070_10156890634939071", - "platform": "Facebook", - "date": "2019-09-07 22:20:02", - "updated": "2019-09-08 00:42:26", - "type": "link", - "title": "19-year-old Canadian Bianca Andreescu defeats Serena Williams in straight sets in the US Open final", - "caption": "businessinsider.com", - "description": "Andreescu's victory Saturday prevented Williams from claiming what would have been her record-tying 24th major singles championship.", - "message": "Bianca Andreescu built a big lead and then held on to upset Serena Williams 6-3, 7-5.", - "expandedLinks": [ - { - "original": "https://bit.ly/2UCsgL8", - "expanded": "https://www.businessinsider.com/bianca-andreescu-defeats-serena-williams-us-open-final-2019-9?utm_content=buffer39cb2&utm_medium=social&utm_source=facebook.com&utm_campaign=buffer-bi", - } - ], - "link": "https://bit.ly/2UCsgL8", - "postUrl": "https://www.facebook.com/businessinsider/posts/10156890634939071", - "subscriberCount": 9107012, - "score": 11.270833333333334, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBGfkqTVz4UilE_&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d742c27e06cfa1a6c0dd357%2F090aaece09f13ab3d249fb01a6b03fe1d0a77696_a0619c160e7449d5962096e9723c12db15abc024_facebook&cfs=1&_nc_hash=AQDohz4x58mNUoe7", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDRg-pPDUTgXpv6&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d742c27e06cfa1a6c0dd357%2F090aaece09f13ab3d249fb01a6b03fe1d0a77696_a0619c160e7449d5962096e9723c12db15abc024_facebook&_nc_hash=AQCdrW99_j9Evkv4", - } - ], - "statistics": { - "actual": { - "likeCount": 360, - "shareCount": 48, - "commentCount": 34, - "loveCount": 48, - "wowCount": 41, - "hahaCount": 3, - "sadCount": 6, - "angryCount": 1, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 12, - "shareCount": 6, - "commentCount": 6, - "loveCount": 2, - "wowCount": 3, - "hahaCount": 7, - "sadCount": 5, - "angryCount": 7, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6648, - "name": "Business Insider", - "handle": "businessinsider", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/20140008_10154867513079071_8190657407315988923_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkI55CBCj4kJdip-PX9AJ_S4mxJ5XQ4nlum3ikySzQgBRQCJSXsyjHW-8w8qPH2aX4&_nc_ht=scontent.xx&oh=4d024551fc98af700d89602c6980c3c0&oe=5E155CB9", - "subscriberCount": 9107575, - "url": "https://www.facebook.com/20446254070", - "platform": "Facebook", - "platformId": "20446254070", - "verified": True, - }, - }, - { - "id": 70163422198, - "platformId": "532854420074062_3748273735198765", - "platform": "Facebook", - "date": "2019-09-07 21:02:32", - "updated": "2019-09-08 00:43:19", - "type": "native_video", - "description": "Just chilling out... 😂😍", - "expandedLinks": [ - { - "original": "https://www.facebook.com/VT/videos/327087418106099/", - "expanded": "https://www.facebook.com/VT/videos/327087418106099/", - } - ], - "link": "https://www.facebook.com/VT/videos/327087418106099/", - "postUrl": "https://www.facebook.com/thefavesusa/posts/3748273735198765", - "subscriberCount": 6323442, - "score": 11.15549597855228, - "media": [ - { - "type": "video", - "url": "https://video.xx.fbcdn.net/v/t42.9040-2/46221677_357208778367365_7578121201361354752_n.mp4?_nc_cat=108&efg=eyJybHIiOjMxMywicmxhIjo1MTIsInZlbmNvZGVfdGFnIjoic3ZlX3NkIn0%3D&_nc_oc=AQnOjrn8FDX2fxGFfKnGJZqWUFAWlA_cvTOIuOBt9OOmY-usgi8QZRiJx2scwnwUMdU&rl=313&vabr=174&_nc_ht=video.xx&oh=6f26a3ba7feed19e5a13b63b659afa4e&oe=5D75B702", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t15.5256-10/s720x720/45627901_327088121439362_6904272001196621824_n.jpg?_nc_cat=1&_nc_oc=AQn6h63uzCboFTqDkVSVhwjW_rs_dZ6IZACYblps27vlp0Upu5U471nUelf5p_T-pQE&_nc_ht=scontent.xx&oh=fff7a12b0243ecc92c367ac0139f03bc&oe=5E1112A4", - "height": 720, - "width": 405, - "full": "https://scontent.xx.fbcdn.net/v/t15.5256-10/45627901_327088121439362_6904272001196621824_n.jpg?_nc_cat=1&_nc_oc=AQn6h63uzCboFTqDkVSVhwjW_rs_dZ6IZACYblps27vlp0Upu5U471nUelf5p_T-pQE&_nc_ht=scontent.xx&oh=d272a3fbde01b1f176662957b8461ead&oe=5E00F6EC", - }, - ], - "statistics": { - "actual": { - "likeCount": 3164, - "shareCount": 3183, - "commentCount": 234, - "loveCount": 893, - "wowCount": 19, - "hahaCount": 828, - "sadCount": 0, - "angryCount": 1, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 337, - "shareCount": 250, - "commentCount": 67, - "loveCount": 32, - "wowCount": 7, - "hahaCount": 46, - "sadCount": 4, - "angryCount": 3, - "thankfulCount": 0, - }, - }, - "account": { - "id": 48728, - "name": "Faves USA", - "handle": "thefavesusa", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13590243_1529567430402751_5505197343663543097_n.jpg?_nc_cat=1&_nc_oc=AQlqHYa5f3hh3Tu7bwL_7yF5WVkxCnE2WIU8c_5Fs_eMudF84ODKZoLqn8S3lZDdt3g&_nc_ht=scontent.xx&oh=b45134ffcb1aa806ced2cb018887de04&oe=5E0ED98A", - "subscriberCount": 6323373, - "url": "https://www.facebook.com/532854420074062", - "platform": "Facebook", - "platformId": "532854420074062", - "verified": True, - }, - "videoLengthMS": 34238, - }, - { - "id": 70161065649, - "platformId": "172526489431467_3227961787221240", - "platform": "Facebook", - "date": "2019-09-07 20:00:08", - "updated": "2019-09-08 00:24:24", - "type": "link", - "title": "Rob Reiner Declares: ‘The Impeachment Process Is About To Begin’ | Tea Party", - "caption": "teaparty.org", - "description": "Rob Reiner Declares: ‘The Impeachment Process Is About To Begin’ (Breitbart) – Hollywood actor-director Rob Reiner took to Twitter on Saturday and declared that “the impeachment process is about to begin,” against President Donald Trump. “The Impeachment process is about to begin. The ar...", - "message": "Rob Reiner Declares: ‘The Impeachment Process Is About To Begin’", - "expandedLinks": [ - { - "original": "http://ow.ly/thAc30puHYx", - "expanded": "https://www.teaparty.org/rob-reiner-declares-the-impeachment-process-is-about-to-begin-408805/", - } - ], - "link": "http://ow.ly/thAc30puHYx", - "postUrl": "https://www.facebook.com/teapartyorg/posts/3227961787221240", - "subscriberCount": 416823, - "score": 11.11111111111111, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQB-IgFpD1LsDy90&w=720&h=720&url=https%3A%2F%2Fwww.teaparty.org%2Fwp-content%2Fuploads%2F2019%2F09%2Freiner-new.jpg&cfs=1&_nc_hash=AQDAvJ6ay40Yyv9K", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBEtc-nGN1pXKKM&url=https%3A%2F%2Fwww.teaparty.org%2Fwp-content%2Fuploads%2F2019%2F09%2Freiner-new.jpg&_nc_hash=AQDV8GfnZCDBS9dx", - } - ], - "statistics": { - "actual": { - "likeCount": 8, - "shareCount": 15, - "commentCount": 194, - "loveCount": 0, - "wowCount": 1, - "hahaCount": 134, - "sadCount": 1, - "angryCount": 47, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 6, - "shareCount": 5, - "commentCount": 7, - "loveCount": 2, - "wowCount": 2, - "hahaCount": 5, - "sadCount": 1, - "angryCount": 8, - "thankfulCount": 0, - }, - }, - "account": { - "id": 370587, - "name": "Tea Party", - "handle": "teapartyorg", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10645152_1119529788064461_6831324369519464936_n.png?_nc_cat=109&_nc_oc=AQlYPwkxXVKsTPXKN2iEw6-kekm3w1t-TNKlGRez6lg5WNmUCadSHtPr1aKi7-vMXx0&_nc_ht=scontent.xx&oh=47cea2dd8d0821de871a1f427d4cc9c3&oe=5E051C4C", - "subscriberCount": 416797, - "url": "https://www.facebook.com/172526489431467", - "platform": "Facebook", - "platformId": "172526489431467", - "verified": True, - }, - }, - { - "id": 70157545859, - "platformId": "131459315949_10157157161525950", - "platform": "Facebook", - "date": "2019-09-07 18:59:47", - "updated": "2019-09-08 00:28:55", - "type": "video", - "caption": "cbsnews.com", - "description": " ", - "message": "President Trump has been slamming the media for the coverage of his claim that Alabama was in grave danger from Hurricane Dorian.", - "expandedLinks": [ - { - "original": "https://cbsn.ws/300OGXC", - "expanded": "https://www.cbsnews.com/news/noaa-backs-up-president-trump-claim-that-alabama-could-be-affected-by-hurricane-2019-09-07/?ftag=CNM-00-10aab6a&linkId=73317898", - } - ], - "link": "https://cbsn.ws/300OGXC", - "postUrl": "https://www.facebook.com/CBSNews/posts/10157157161525950", - "subscriberCount": 5892543, - "score": 11.091743119266056, - "media": [ - { - "type": "video", - "url": "https://public.vilynx.com/direct/8fc31712de713e0c34c55c4bce033614/dfa46269-dee8-47d5-a4d2-75d7e30ed087/pro69.viwindow.mp4", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCbyqBM5kDSW7D5&w=630&h=630&url=https%3A%2F%2Fcbsnews2.cbsistatic.com%2Fhub%2Fi%2Fr%2F2019%2F08%2F29%2F0a29d008-ae46-4e99-b300-93e27828e182%2Fthumbnail%2F1200x630%2F40ae14190bd7d89123364faa648de9c9%2Fgettyimages-1164696989.jpg&cfs=1&sx=323&sy=0&sw=630&sh=630&_nc_hash=AQBfp4rDK-YSkYIB", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBoZQPh51tH66xT&w=1200&h=630&url=https%3A%2F%2Fcbsnews2.cbsistatic.com%2Fhub%2Fi%2Fr%2F2019%2F08%2F29%2F0a29d008-ae46-4e99-b300-93e27828e182%2Fthumbnail%2F1200x630%2F40ae14190bd7d89123364faa648de9c9%2Fgettyimages-1164696989.jpg&crop&sx=0&sy=0&sw=1200&sh=630&_nc_hash=AQDhDDBJSu3_Lwws", - }, - ], - "statistics": { - "actual": { - "likeCount": 362, - "shareCount": 160, - "commentCount": 710, - "loveCount": 28, - "wowCount": 33, - "hahaCount": 674, - "sadCount": 35, - "angryCount": 416, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 76, - "shareCount": 33, - "commentCount": 46, - "loveCount": 8, - "wowCount": 11, - "hahaCount": 19, - "sadCount": 16, - "angryCount": 9, - "thankfulCount": 0, - }, - }, - "account": { - "id": 14655, - "name": "CBS News", - "handle": "CBSNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/c7.0.200.200a/p200x200/11052868_10153128917450950_7657871426571821819_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlXjGTrfksAnoG50hBe4WDnf00w6XeLzrCR-xvjCQkB_VlwwTuquCV4zQB0tMkmVTU&_nc_ht=scontent.xx&oh=66fa68d473b2015c3875d62e625a12d1&oe=5E0EF6CB", - "subscriberCount": 5892766, - "url": "https://www.facebook.com/131459315949", - "platform": "Facebook", - "platformId": "131459315949", - "verified": True, - }, - }, - { - "id": 70174483693, - "platformId": "273864989376427_2990274061068826", - "platform": "Facebook", - "date": "2019-09-07 23:57:09", - "updated": "2019-09-08 00:44:08", - "type": "link", - "title": "Trump says he was set to hold secret talks with Taliban at Camp David in the US", - "caption": "nbcnews.com", - "description": " ", - "message": "BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://on.msnbc.com/2LB1dvs", - "expandedLinks": [ - { - "original": "https://on.msnbc.com/2LB1dvs", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_ms_fb_ma&fbclid=IwAR1hvIf0wom7aKl4oj50ODjDPVtW24tM42WDeAzbY4olTUyN3dg3nUdQ3CI", - }, - { - "original": "https://on.msnbc.com/2LB1dvs", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_ms_fb_ma&fbclid=IwAR1hvIf0wom7aKl4oj50ODjDPVtW24tM42WDeAzbY4olTUyN3dg3nUdQ3CI", - }, - ], - "link": "https://on.msnbc.com/2LB1dvs", - "postUrl": "https://www.facebook.com/msnbc/posts/2990274061068826", - "subscriberCount": 2290452, - "score": 11.05019305019305, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 136, - "shareCount": 430, - "commentCount": 961, - "loveCount": 7, - "wowCount": 117, - "hahaCount": 765, - "sadCount": 15, - "angryCount": 431, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 61, - "shareCount": 33, - "commentCount": 67, - "loveCount": 5, - "wowCount": 10, - "hahaCount": 23, - "sadCount": 16, - "angryCount": 44, - "thankfulCount": 0, - }, - }, - "account": { - "id": 8324, - "name": "MSNBC", - "handle": "msnbc", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15741035_1414682885294626_1846918595507309997_n.jpg?_nc_cat=1&_nc_oc=AQmNSDImiJ4dNS4a9BuTF3tFyF2W0xSOLxgQfdY6R_AXaZm8hkQc6XT-GWy5NIEe080&_nc_ht=scontent.xx&oh=968e2c2f1d76f19278ac5985b55af46d&oe=5E003BB2", - "subscriberCount": 2290512, - "url": "https://www.facebook.com/273864989376427", - "platform": "Facebook", - "platformId": "273864989376427", - "verified": True, - }, - }, - { - "id": 70157512987, - "platformId": "167115176655082_2994611437238761", - "platform": "Facebook", - "date": "2019-09-07 19:00:10", - "updated": "2019-09-08 00:25:19", - "type": "link", - "title": "The First People to See 'Joker' Say It's Absolutely Incredible", - "caption": "vice.com", - "description": '"It\'s a masterpiece."', - "message": "Wow.", - "expandedLinks": [ - { - "original": "https://www.vice.com/en_us/article/43kw9m/the-first-people-to-see-joker-say-its-absolutely-incredible?utm_source=vicefbus", - "expanded": "https://www.vice.com/en_us/article/43kw9m/the-first-people-to-see-joker-say-its-absolutely-incredible?utm_source=vicefbus", - } - ], - "link": "https://www.vice.com/en_us/article/43kw9m/the-first-people-to-see-joker-say-its-absolutely-incredible?utm_source=vicefbus", - "postUrl": "https://www.facebook.com/VICE/posts/2994611437238761", - "subscriberCount": 8174144, - "score": 10.957142857142857, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQAnGhBeKGypWT01&w=674&h=674&url=https%3A%2F%2Fvideo-images.vice.com%2Farticles%2F5d689eaf5bd4cf000aabe8f6%2Flede%2F1567344325767-Screen-Shot-2019-09-01-at-92510-AM.png%3Fcrop%3D0.7819xw%3A0.8236xh%3B0.1156xw%2C0.0612xh%26resize%3D1200%3A%2A&cfs=1&sx=298&sy=0&sw=674&sh=674&_nc_hash=AQDAKv3kHukpFn1V", - "height": 674, - "width": 674, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQA8VrX1m3AEk4av&url=https%3A%2F%2Fvideo-images.vice.com%2Farticles%2F5d689eaf5bd4cf000aabe8f6%2Flede%2F1567344325767-Screen-Shot-2019-09-01-at-92510-AM.png%3Fcrop%3D0.7819xw%3A0.8236xh%3B0.1156xw%2C0.0612xh%26resize%3D1200%3A%2A&_nc_hash=AQBrUKcye7gfy7Yr", - } - ], - "statistics": { - "actual": { - "likeCount": 1104, - "shareCount": 102, - "commentCount": 181, - "loveCount": 117, - "wowCount": 23, - "hahaCount": 7, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 57, - "shareCount": 21, - "commentCount": 30, - "loveCount": 6, - "wowCount": 7, - "hahaCount": 12, - "sadCount": 3, - "angryCount": 4, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6646, - "name": "VICE", - "handle": "VICE", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13427861_1304295039603751_2178102892370936049_n.jpg?_nc_cat=1&_nc_oc=AQmzoEUjC5BCCMVSsIFvWa52KGr3Iqh9f0Y_eezqYMFw7h_EUam7WQdYxEFvJB6LoP0&_nc_ht=scontent.xx&oh=847f8eb6c5132c90382bc0940afbc692&oe=5E02C5BA", - "subscriberCount": 8177544, - "url": "https://www.facebook.com/167115176655082", - "platform": "Facebook", - "platformId": "167115176655082", - "verified": True, - }, - }, - { - "id": 70157378615, - "platformId": "189885532970_10157980386047971", - "platform": "Facebook", - "date": "2019-09-07 19:00:00", - "updated": "2019-09-08 00:18:45", - "type": "link", - "title": "'Eat Less Meat': 2020 Democrats Mull Changing Laws on Meat Consumption to Fight Climate Change", - "caption": "ijr.com", - "description": "Several of the 2020 Democratic presidential contenders took to the stage of Thursday night to lay out their plan to address climate change, and in doing so, some of them mulled ideas of changing laws surrounding meat consumption.", - "message": "In case you missed it:", - "expandedLinks": [ - { - "original": "https://ijr.com/2020-democrats-mull-changing-laws-meat-consumption-climate-change/", - "expanded": "https://ijr.com/2020-democrats-mull-changing-laws-meat-consumption-climate-change/", - } - ], - "link": "https://ijr.com/2020-democrats-mull-changing-laws-meat-consumption-climate-change/", - "postUrl": "https://www.facebook.com/IJRRed/posts/10157980386047971", - "subscriberCount": 8532193, - "score": 10.8932527693857, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQACOVQMpvwNJjWm&w=450&h=450&url=https%3A%2F%2F242358-745360-raikfcquaxqncofqfm.stackpathdns.com%2Fwp-content%2Fuploads%2F2019%2F09%2Fphotomix-image-2019-09-05T112755.284.jpg&cfs=1&sx=183&sy=0&sw=450&sh=450&_nc_hash=AQCY2wd2lhyIQTks", - "height": 450, - "width": 450, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAoypPZmdrTpzAQ&url=https%3A%2F%2F242358-745360-raikfcquaxqncofqfm.stackpathdns.com%2Fwp-content%2Fuploads%2F2019%2F09%2Fphotomix-image-2019-09-05T112755.284.jpg&_nc_hash=AQCh4NFb432qR6pj", - } - ], - "statistics": { - "actual": { - "likeCount": 421, - "shareCount": 3158, - "commentCount": 2513, - "loveCount": 8, - "wowCount": 258, - "hahaCount": 2667, - "sadCount": 84, - "angryCount": 1708, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 400, - "shareCount": 120, - "commentCount": 258, - "loveCount": 15, - "wowCount": 21, - "hahaCount": 110, - "sadCount": 11, - "angryCount": 58, - "thankfulCount": 0, - }, - }, - "account": { - "id": 30245, - "name": "IJR Red", - "handle": "IJRRed", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/23376285_10156265164197971_2450414612163288246_n.jpg?_nc_cat=1&_nc_oc=AQm4KDy-Qmj38dJbaAQ0KXPVdY94zu7JBQAIUkAO2_W0uRWIl-5aI18nffFvxZoVICg&_nc_ht=scontent.xx&oh=ab7b4676afa9874079a36c20150411f5&oe=5E0C3B40", - "subscriberCount": 8531658, - "url": "https://www.facebook.com/189885532970", - "platform": "Facebook", - "platformId": "189885532970", - "verified": True, - }, - }, - { - "id": 70164096778, - "platformId": "709959352435346_2345707418860523", - "platform": "Facebook", - "date": "2019-09-07 21:11:27", - "updated": "2019-09-08 00:41:40", - "type": "photo", - "message": '“Congratulations to the University of Nebraska on being the first school in the nation with home stadiums in both the Big Ten and the Pac-12." - Ben Sasse', - "expandedLinks": [ - { - "original": "https://www.facebook.com/SenatorSasse/photos/a.730973980333883/2345706642193934/?type=3", - "expanded": "https://www.facebook.com/SenatorSasse/photos/a.730973980333883/2345706642193934/?type=3", - } - ], - "link": "https://www.facebook.com/SenatorSasse/photos/a.730973980333883/2345706642193934/?type=3", - "postUrl": "https://www.facebook.com/SenatorSasse/posts/2345707418860523", - "subscriberCount": 47255, - "score": 10.714285714285714, - "media": [ - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t1.0-9/s720x720/69639380_2345706645527267_3280132400163586048_o.jpg?_nc_cat=101&_nc_oc=AQnRAbvB5XIVIOEbeUHM6drPHgfaP4ShhF7VuYpxbXGE3wfw1GONw_NvPgJGwAFJeoE&_nc_ht=scontent.xx&oh=026b7c097cfa8b96018a93274a1c289f&oe=5E150C5D", - "height": 432, - "width": 720, - "full": "https://scontent.xx.fbcdn.net/v/t1.0-9/s720x720/69639380_2345706645527267_3280132400163586048_o.jpg?_nc_cat=101&_nc_oc=AQnRAbvB5XIVIOEbeUHM6drPHgfaP4ShhF7VuYpxbXGE3wfw1GONw_NvPgJGwAFJeoE&_nc_ht=scontent.xx&oh=026b7c097cfa8b96018a93274a1c289f&oe=5E150C5D", - } - ], - "statistics": { - "actual": { - "likeCount": 766, - "shareCount": 385, - "commentCount": 67, - "loveCount": 141, - "wowCount": 6, - "hahaCount": 358, - "sadCount": 1, - "angryCount": 1, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 90, - "shareCount": 8, - "commentCount": 13, - "loveCount": 32, - "wowCount": 3, - "hahaCount": 3, - "sadCount": 7, - "angryCount": 5, - "thankfulCount": 0, - }, - }, - "account": { - "id": 124328, - "name": "Senator Ben Sasse", - "handle": "SenatorSasse", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/32511931_1615949251836347_83407049412575232_n.jpg?_nc_cat=1&_nc_oc=AQmcObZPPXWZnOrR03LfCfbo0_O7SZ4plnkjx56iJ4PDvv4E3SP7GVUSS9glUE4IAbo&_nc_ht=scontent.xx&oh=572cd1c7d7bca987cd593514df1d6afb&oe=5DCA9FE3", - "subscriberCount": 47281, - "url": "https://www.facebook.com/709959352435346", - "platform": "Facebook", - "platformId": "709959352435346", - "verified": True, - }, - }, - { - "id": 70160675221, - "platformId": "13312631635_10157392187846636", - "platform": "Facebook", - "date": "2019-09-07 20:01:21", - "updated": "2019-09-08 00:42:43", - "type": "link", - "title": "Intelligent people tend to be messy, stay awake longer, and swear more", - "caption": "independent.co.uk", - "description": "If you think about it, those who don't use any swear words are the ones who limit their vocabulary", - "message": "Just in case you forgot", - "expandedLinks": [ - { - "original": "http://www.independent.co.uk/news/science/intelligent-people-tend-to-be-messy-stay-awake-longer-and-swear-more-a7174256.html?utm_medium=Social&utm_source=Facebook#Echobox=1567847014", - "expanded": "https://www.independent.co.uk/news/science/intelligent-people-tend-to-be-messy-stay-awake-longer-and-swear-more-a7174256.html?utm_medium=Social&utm_source=Facebook", - } - ], - "link": "http://www.independent.co.uk/news/science/intelligent-people-tend-to-be-messy-stay-awake-longer-and-swear-more-a7174256.html?utm_medium=Social&utm_source=Facebook#Echobox=1567847014", - "postUrl": "https://www.facebook.com/TheIndependentOnline/posts/10157392187846636", - "subscriberCount": 8832865, - "score": 10.694656488549619, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDTI-q2CoWnVI6v&w=720&h=720&url=https%3A%2F%2Fstatic.independent.co.uk%2Fs3fs-public%2Fthumbnails%2Fimage%2F2015%2F10%2F13%2F14%2FJennifer-Lawrence.jpg&cfs=1&sx=246&sy=0&sw=1000&sh=1000&_nc_hash=AQBwG9rCHx8nZd9s", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQA5nGOIbrLuuzOd&url=https%3A%2F%2Fstatic.independent.co.uk%2Fs3fs-public%2Fthumbnails%2Fimage%2F2015%2F10%2F13%2F14%2FJennifer-Lawrence.jpg&_nc_hash=AQDlXAqho5KxD282", - } - ], - "statistics": { - "actual": { - "likeCount": 1255, - "shareCount": 722, - "commentCount": 486, - "loveCount": 122, - "wowCount": 9, - "hahaCount": 205, - "sadCount": 2, - "angryCount": 1, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 90, - "shareCount": 44, - "commentCount": 58, - "loveCount": 9, - "wowCount": 10, - "hahaCount": 27, - "sadCount": 8, - "angryCount": 16, - "thankfulCount": 0, - }, - }, - "account": { - "id": 19065, - "name": "The Independent", - "handle": "TheIndependentOnline", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/11051795_10152732082756636_6705742038347351188_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQmApCC_log9_TfPU5-TLVRKHyBo2YH6UPG2d6R-43r5u7HhElr7QPKk9J_AXR9q1Ac&_nc_ht=scontent.xx&oh=47ac79067cb2e33520f6920eb409611d&oe=5E0FED75", - "subscriberCount": 8834731, - "url": "https://www.facebook.com/13312631635", - "platform": "Facebook", - "platformId": "13312631635", - "verified": True, - }, - }, - { - "id": 70167308119, - "platformId": "134486075205_10163968846050206", - "platform": "Facebook", - "date": "2019-09-07 22:06:05", - "updated": "2019-09-08 00:43:59", - "type": "link", - "title": "Fisherman performs C-section on dead pregnant shark, pulls out 98 live pups", - "caption": "nypost.com", - "description": "This is the incredible moment a fisherman performs a C-section on a dead shark – and releases 98 live babies back into the wild. Mathew Orlov carried out the impromptu operation when he realized th…", - "message": '“When I saw the belly moving, instinct kicked in."', - "expandedLinks": [ - { - "original": "https://trib.al/LgggOVZ", - "expanded": "https://nypost.com/2018/04/13/fisherman-pulls-98-live-babies-out-of-dead-shark/?utm_medium=SocialFlow&sr_share=facebook&utm_source=NYPFacebook&utm_campaign=SocialFlow&fbclid=IwAR3o967WReYiKfY2dLOU9zIaEcsGgLAy5p9d0pU-GiXMLnzc8ZZ7Pe5CSxQ", - } - ], - "link": "https://trib.al/LgggOVZ", - "postUrl": "https://www.facebook.com/NYPost/posts/10163968846050206", - "subscriberCount": 4182920, - "score": 10.654450261780104, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBUGloTZw1nxKQg&w=720&h=720&url=https%3A%2F%2Fthenypost.files.wordpress.com%2F2018%2F04%2F180413-shark-c-section-dead-shark-index.jpg%3Fquality%3D90%26strip%3Dall%26w%3D1200&cfs=1&_nc_hash=AQBnfnyTsHsawpB1", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBwye2XJlVhygAX&url=https%3A%2F%2Fthenypost.files.wordpress.com%2F2018%2F04%2F180413-shark-c-section-dead-shark-index.jpg%3Fquality%3D90%26strip%3Dall%26w%3D1200&_nc_hash=AQDS28jMlElJ1X8_", - } - ], - "statistics": { - "actual": { - "likeCount": 704, - "shareCount": 567, - "commentCount": 78, - "loveCount": 166, - "wowCount": 482, - "hahaCount": 6, - "sadCount": 28, - "angryCount": 4, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 48, - "shareCount": 42, - "commentCount": 47, - "loveCount": 5, - "wowCount": 12, - "hahaCount": 20, - "sadCount": 8, - "angryCount": 9, - "thankfulCount": 0, - }, - }, - "account": { - "id": 10342, - "name": "New York Post", - "handle": "NYPost", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12932928_10157483552025206_1176575955706691041_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQnPmbZuC7S1v1NTPRZ7rWQU4EucwAW3nKx-aXD0PzlPsD3ifQpdaLcXEegH730Wy_o&_nc_ht=scontent.xx&oh=c77d86309611fa2972df1979bf6cab9e&oe=5E0827CA", - "subscriberCount": 4183079, - "url": "https://www.facebook.com/134486075205", - "platform": "Facebook", - "platformId": "134486075205", - "verified": True, - }, - }, - { - "id": 70159375367, - "platformId": "5281959998_10152009926804999", - "platform": "Facebook", - "date": "2019-09-07 19:40:00", - "updated": "2019-09-08 00:47:27", - "type": "link", - "title": "A Virginia University Offers Free Semester to Students in Bahamas Displaced by Hurricane", - "caption": "nytimes.com", - "description": "In the wake of Hurricane Dorian, Hampton University will open its doors to students from the University of the Bahamas.", - "message": "Hampton University, a historically black institution in Virginia, is offering a semester of free room, board and tuition to University of the Bahamas students affected by Hurricane Dorian.", - "expandedLinks": [ - { - "original": "https://www.nytimes.com/2019/09/07/us/hampton-university-bahamas.html?smid=fb-nytimes&smtyp=cur", - "expanded": "https://www.nytimes.com/2019/09/07/us/hampton-university-bahamas.html?smid=fb-nytimes&smtyp=cur", - } - ], - "link": "https://www.nytimes.com/2019/09/07/us/hampton-university-bahamas.html?smid=fb-nytimes&smtyp=cur", - "postUrl": "https://www.facebook.com/nytimes/posts/10152009926804999", - "subscriberCount": 16854203, - "score": 10.245652173913044, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQC3W5MeqRKPesIZ&w=720&h=720&url=https%3A%2F%2Fstatic01.nyt.com%2Fimages%2F2019%2F09%2F08%2Fmultimedia%2F08xp-hampton%2F05xp-hampton-facebookJumbo.jpg&cfs=1&_nc_hash=AQCcTlBbi1yooU7X", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBVcBFba0vlBYuZ&url=https%3A%2F%2Fstatic01.nyt.com%2Fimages%2F2019%2F09%2F08%2Fmultimedia%2F08xp-hampton%2F05xp-hampton-facebookJumbo.jpg&_nc_hash=AQBVEUeUjhFuwrpM", - } - ], - "statistics": { - "actual": { - "likeCount": 3250, - "shareCount": 417, - "commentCount": 81, - "loveCount": 897, - "wowCount": 64, - "hahaCount": 3, - "sadCount": 0, - "angryCount": 1, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 198, - "shareCount": 70, - "commentCount": 89, - "loveCount": 14, - "wowCount": 21, - "hahaCount": 27, - "sadCount": 19, - "angryCount": 22, - "thankfulCount": 0, - }, - }, - "account": { - "id": 7132, - "name": "The New York Times", - "handle": "nytimes", - "profileImage": "https://scontent.xx.fbcdn.net/v/t34.0-1/p200x200/38987133_2766049203424553_1238434690_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkaWRCuHf9GL6ACpzc33xhzk0PaoZZpZJjgHAUJqYB_x5SH2TI2LqBRTlosS59Dtlw&_nc_ht=scontent.xx&oh=6c30114417175d395e99d2e75167ad16&oe=5D765D57", - "subscriberCount": 16854715, - "url": "https://www.facebook.com/5281959998", - "platform": "Facebook", - "platformId": "5281959998", - "verified": True, - }, - }, - { - "id": 70167645982, - "platformId": "131459315949_10157157628655950", - "platform": "Facebook", - "date": "2019-09-07 22:19:43", - "updated": "2019-09-08 00:28:55", - "type": "video", - "caption": "cbsnews.com", - "description": " ", - "message": "The vote signifies an escalation of the inquiry into whether President Trump should be impeached.", - "expandedLinks": [ - { - "original": "https://cbsn.ws/34ucx5h", - "expanded": "https://www.cbsnews.com/news/house-judiciary-committee-to-vote-on-defining-impeachment-inquiry-2019-09-07/?ftag=CNM-00-10aab6a&linkId=73322081", - } - ], - "link": "https://cbsn.ws/34ucx5h", - "postUrl": "https://www.facebook.com/CBSNews/posts/10157157628655950", - "subscriberCount": 5892543, - "score": 10.166666666666666, - "media": [ - { - "type": "video", - "url": "https://public.vilynx.com/direct/8fc31712de713e0c34c55c4bce033614/65d47c3b-5c2f-413e-b8a2-11c147162256/pro69.viwindow.mp4", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBW8RXTe_M1XwuG&w=630&h=630&url=https%3A%2F%2Fcbsnews2.cbsistatic.com%2Fhub%2Fi%2Fr%2F2019%2F07%2F24%2F16269e5d-742b-4d25-bf3e-ab7e89bff231%2Fthumbnail%2F1200x630%2Fc0fbe34dbb7b09ee3a51a3732fb0568e%2Fcbsn-fusion-cbs-news-special-report-opening-statement-jerry-nadler-thumbnail-1897399-640x360.jpg&cfs=1&sx=288&sy=0&sw=630&sh=630&_nc_hash=AQC7qXK3Rnuf8cKx", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCfVhSfr4ER8ue6&w=1200&h=630&url=https%3A%2F%2Fcbsnews2.cbsistatic.com%2Fhub%2Fi%2Fr%2F2019%2F07%2F24%2F16269e5d-742b-4d25-bf3e-ab7e89bff231%2Fthumbnail%2F1200x630%2Fc0fbe34dbb7b09ee3a51a3732fb0568e%2Fcbsn-fusion-cbs-news-special-report-opening-statement-jerry-nadler-thumbnail-1897399-640x360.jpg&crop&sx=0&sy=0&sw=1200&sh=630&_nc_hash=AQC5DoCNSb-UglIh", - }, - ], - "statistics": { - "actual": { - "likeCount": 828, - "shareCount": 161, - "commentCount": 395, - "loveCount": 181, - "wowCount": 13, - "hahaCount": 243, - "sadCount": 5, - "angryCount": 65, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 63, - "shareCount": 27, - "commentCount": 43, - "loveCount": 8, - "wowCount": 9, - "hahaCount": 14, - "sadCount": 14, - "angryCount": 8, - "thankfulCount": 0, - }, - }, - "account": { - "id": 14655, - "name": "CBS News", - "handle": "CBSNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/c7.0.200.200a/p200x200/11052868_10153128917450950_7657871426571821819_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlXjGTrfksAnoG50hBe4WDnf00w6XeLzrCR-xvjCQkB_VlwwTuquCV4zQB0tMkmVTU&_nc_ht=scontent.xx&oh=66fa68d473b2015c3875d62e625a12d1&oe=5E0EF6CB", - "subscriberCount": 5892766, - "url": "https://www.facebook.com/131459315949", - "platform": "Facebook", - "platformId": "131459315949", - "verified": True, - }, - }, - { - "id": 70169322403, - "platformId": "131459315949_10157157685470950", - "platform": "Facebook", - "date": "2019-09-07 22:39:50", - "updated": "2019-09-08 00:28:55", - "type": "video", - "caption": "cbsnews.com", - "description": " ", - "message": "Brody has autism and is nonverbal, but when he laid his head in Snow White's lap, she knew just what to do.", - "expandedLinks": [ - { - "original": "https://cbsn.ws/2ZVMfFt", - "expanded": "https://www.cbsnews.com/news/snow-white-comforts-boy-with-autism-who-had-a-meltdown-in-disney-world/?ftag=CNM-00-10aab6a&linkId=73322490", - } - ], - "link": "https://cbsn.ws/2ZVMfFt", - "postUrl": "https://www.facebook.com/CBSNews/posts/10157157685470950", - "subscriberCount": 5892543, - "score": 10.161111111111111, - "media": [ - { - "type": "video", - "url": "https://public.vilynx.com/direct/8fc31712de713e0c34c55c4bce033614/e9d4b8d2-3cdd-4db6-a57a-6cece6c6105f/pro69.viwindow.mp4", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBLzNfU6X0rpLci&w=630&h=630&url=https%3A%2F%2Fcbsnews1.cbsistatic.com%2Fhub%2Fi%2Fr%2F2019%2F09%2F04%2F81484701-f4ca-41be-9857-d7791a492ff4%2Fthumbnail%2F1200x630%2F47c61872eff69cc27da4106e169e7363%2Funtitled-collage-3.jpg&cfs=1&sx=43&sy=0&sw=630&sh=630&_nc_hash=AQDXFwpknxKfOMPn", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDGGpJ32waOZB6k&w=1200&h=630&url=https%3A%2F%2Fcbsnews1.cbsistatic.com%2Fhub%2Fi%2Fr%2F2019%2F09%2F04%2F81484701-f4ca-41be-9857-d7791a492ff4%2Fthumbnail%2F1200x630%2F47c61872eff69cc27da4106e169e7363%2Funtitled-collage-3.jpg&crop&sx=0&sy=0&sw=1200&sh=630&_nc_hash=AQAvhhzs725BcBMw", - }, - ], - "statistics": { - "actual": { - "likeCount": 763, - "shareCount": 413, - "commentCount": 37, - "loveCount": 582, - "wowCount": 5, - "hahaCount": 1, - "sadCount": 28, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 60, - "shareCount": 26, - "commentCount": 42, - "loveCount": 8, - "wowCount": 9, - "hahaCount": 13, - "sadCount": 14, - "angryCount": 8, - "thankfulCount": 0, - }, - }, - "account": { - "id": 14655, - "name": "CBS News", - "handle": "CBSNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/c7.0.200.200a/p200x200/11052868_10153128917450950_7657871426571821819_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlXjGTrfksAnoG50hBe4WDnf00w6XeLzrCR-xvjCQkB_VlwwTuquCV4zQB0tMkmVTU&_nc_ht=scontent.xx&oh=66fa68d473b2015c3875d62e625a12d1&oe=5E0EF6CB", - "subscriberCount": 5892766, - "url": "https://www.facebook.com/131459315949", - "platform": "Facebook", - "platformId": "131459315949", - "verified": True, - }, - }, - { - "id": 70172671434, - "platformId": "268914272540_10156464486172541", - "platform": "Facebook", - "date": "2019-09-07 23:40:03", - "updated": "2019-09-08 00:30:46", - "type": "link", - "title": "EMT dad responds to fatal accident of country star Kylie Rae Harris, finds out daughter was the victim", - "caption": "nydailynews.com", - "description": "The EMT father of Maria Elena Cruz, the teen who fatally collided with country singer Kylie Rae Harris on Wednesday responded to the scene unaware that one of the victims was his daughter.", - "message": "Maria Elena Cruz, 16, who died in a collision with country singer Kylie Rae Harris was treated at the scene by her EMT father. Her father, Pedro Cruz, responded to the deadly accident completely unaware that his daughter was one of his victims.", - "expandedLinks": [ - { - "original": "https://trib.al/jNmLead", - "expanded": "https://www.nydailynews.com/news/crime/ny-kylie-rae-harris-and-victims-dad-20190907-exgtk5a4q5c33fm47gdbaafi6a-story.html?fbclid=IwAR350lwuh3az3OjnIMzY99g3KW5gT8eFuRNMtPacg2AGSn3VFJ5kNSACvqo&fbclid=IwAR0zXLqmmFONyYeaFgWirxx_FECHO_6iv3_bRcILozfgj8pnDFEtboIT1Ag", - } - ], - "link": "https://trib.al/jNmLead", - "postUrl": "https://www.facebook.com/NYDailyNews/posts/10156464486172541", - "subscriberCount": 3119682, - "score": 10.021739130434783, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQAvv2wdaFi7vPsi&w=720&h=720&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2F-iBBbg5wyisMtnRhms2BcExkoWU%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2FW3CGH3O27NGTLGX3E3XB7XF3WE.jpg&cfs=1&_nc_hash=AQCMHXpSpTA-E4Is", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDh0aeIZrN3K3GQ&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2F-iBBbg5wyisMtnRhms2BcExkoWU%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2FW3CGH3O27NGTLGX3E3XB7XF3WE.jpg&_nc_hash=AQDrpc39QwLCuMCx", - } - ], - "statistics": { - "actual": { - "likeCount": 79, - "shareCount": 188, - "commentCount": 68, - "loveCount": 4, - "wowCount": 50, - "hahaCount": 0, - "sadCount": 531, - "angryCount": 2, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 22, - "shareCount": 19, - "commentCount": 16, - "loveCount": 4, - "wowCount": 7, - "hahaCount": 10, - "sadCount": 6, - "angryCount": 8, - "thankfulCount": 0, - }, - }, - "account": { - "id": 18752, - "name": "New York Daily News", - "handle": "NYDailyNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/34963357_10155516739962541_1916910854155010048_n.jpg?_nc_cat=1&_nc_oc=AQmjFK4eo-CK8fL21CSJr1btV3Al6e74byD7EyXVL8apaCEHf5ql7TW_ZRkUiYID0qY&_nc_ht=scontent.xx&oh=e33f579d2d00c6afc68a0e7cbd70b6c8&oe=5E0623E1", - "subscriberCount": 3120017, - "url": "https://www.facebook.com/268914272540", - "platform": "Facebook", - "platformId": "268914272540", - "verified": True, - }, - }, - { - "id": 70158925469, - "platformId": "37763684202_10157814715809203", - "platform": "Facebook", - "date": "2019-09-07 19:16:53", - "updated": "2019-09-08 00:41:24", - "type": "link", - "title": "MIT Media Lab Director Joi Ito Resigns Amid New Jeffrey Epstein Revelations", - "caption": "thedailybeast.com", - "description": "The move comes a day after the New Yorker revealed that Media Lab’s financial relationship to Jeffrey Epstein was more deeply entangled than previously known.", - "message": "BREAKING: MIT Media Lab Director Joi Ito resigns amid new Jeffrey Epstein revelations", - "expandedLinks": [ - { - "original": "https://trib.al/WI1hIej", - "expanded": "https://www.thedailybeast.com/mit-media-lab-director-joi-ito-resigns-amid-new-jeffrey-epstein-revelations?via=FB_Page&source=TDB", - } - ], - "link": "https://trib.al/WI1hIej", - "postUrl": "https://www.facebook.com/thedailybeast/posts/10157814715809203", - "subscriberCount": 2163205, - "score": 9.9438202247191, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQByuRoL0gP8gqu8&w=720&h=720&url=https%3A%2F%2Fimg.thedailybeast.com%2Fimage%2Fupload%2Fc_crop%2Cd_placeholder_euli9k%2Ch_2777%2Cw_4938%2Cx_0%2Cy_0%2Fdpr_2.0%2Fc_limit%2Cw_740%2Ffl_lossy%2Cq_auto%2Fv1567883633%2FGettyImages-1052075272_zwmkfa&cfs=1&_nc_hash=AQBXZ5IEfkt2M0LV", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQA_kBIaNfGxPLto&url=https%3A%2F%2Fimg.thedailybeast.com%2Fimage%2Fupload%2Fc_crop%2Cd_placeholder_euli9k%2Ch_2777%2Cw_4938%2Cx_0%2Cy_0%2Fdpr_2.0%2Fc_limit%2Cw_740%2Ffl_lossy%2Cq_auto%2Fv1567883633%2FGettyImages-1052075272_zwmkfa&_nc_hash=AQDcf_ay2hmMa-1u", - } - ], - "statistics": { - "actual": { - "likeCount": 303, - "shareCount": 284, - "commentCount": 38, - "loveCount": 8, - "wowCount": 192, - "hahaCount": 48, - "sadCount": 1, - "angryCount": 11, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 19, - "shareCount": 11, - "commentCount": 14, - "loveCount": 6, - "wowCount": 6, - "hahaCount": 14, - "sadCount": 7, - "angryCount": 12, - "thankfulCount": 0, - }, - }, - "account": { - "id": 7781, - "name": "The Daily Beast", - "handle": "thedailybeast", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/18447180_10155420999849203_1942956350622474660_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlsvWaYHxyRC2B3NwwmVoV1kpqGNvYkkSxSr_lFopmdwhj-uerxTWu7CmbWz-8Qq-Q&_nc_ht=scontent.xx&oh=86caf840e49b739e6381c591317aab4b&oe=5DC85150", - "subscriberCount": 2163118, - "url": "https://www.facebook.com/37763684202", - "platform": "Facebook", - "platformId": "37763684202", - "verified": True, - }, - }, - { - "id": 70165436882, - "platformId": "182919686769_10156515005471770", - "platform": "Facebook", - "date": "2019-09-07 21:30:07", - "updated": "2019-09-08 00:40:44", - "type": "link", - "title": "Bernie Sanders Says Abortion Will Help Fight Climate Change", - "caption": "dailycaller.com", - "description": "Bernie Sanders Says Abortion Will Help Fight Climate Change", - "message": "Say what?!", - "expandedLinks": [ - { - "original": "https://dailycaller.com/2019/09/04/bernie-sanders-abortion-climate-change/", - "expanded": "https://dailycaller.com/2019/09/04/bernie-sanders-abortion-climate-change/", - } - ], - "link": "https://dailycaller.com/2019/09/04/bernie-sanders-abortion-climate-change/", - "postUrl": "https://www.facebook.com/DailyCaller/posts/10156515005471770", - "subscriberCount": 5408428, - "score": 9.84862385321101, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBAeviXgVv1qTKI&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f0adaa53100da2660e23%2F5351ecff4ddf98a16d14fb0334779a1b0dd0b176_49eaacdfd17f2229c1f6ce7274e4f7a6cd27f870_facebook&cfs=1&_nc_hash=AQCW4ug6vLqDQNsT", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAyo-47Q_tEYqem&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f0adaa53100da2660e23%2F5351ecff4ddf98a16d14fb0334779a1b0dd0b176_49eaacdfd17f2229c1f6ce7274e4f7a6cd27f870_facebook&_nc_hash=AQD4lc3SzFdaf30j", - } - ], - "statistics": { - "actual": { - "likeCount": 221, - "shareCount": 989, - "commentCount": 1329, - "loveCount": 1, - "wowCount": 138, - "hahaCount": 449, - "sadCount": 114, - "angryCount": 1053, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 133, - "shareCount": 64, - "commentCount": 121, - "loveCount": 9, - "wowCount": 16, - "hahaCount": 48, - "sadCount": 8, - "angryCount": 37, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13489, - "name": "The Daily Caller", - "handle": "DailyCaller", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471", - "subscriberCount": 5408115, - "url": "https://www.facebook.com/182919686769", - "platform": "Facebook", - "platformId": "182919686769", - "verified": True, - }, - }, - { - "id": 70158720115, - "platformId": "5281959998_10152009911414999", - "platform": "Facebook", - "date": "2019-09-07 19:25:00", - "updated": "2019-09-08 00:47:27", - "type": "link", - "title": "Two Men Kiss in a Comic Book, and a Mayor Orders a Raid", - "caption": "nytimes.com", - "description": "The raid drew a backlash from festival organizers, publishing houses, comedians and, finally, a Brazilian court, which barred the mayor from further seizure efforts.", - "message": "The mayor of Rio de Janeiro ordered law enforcement agents to raid the city's International Book Fair and seize copies of a comic book that featured 2 men kissing. The festival's organizers took him to court and won.", - "expandedLinks": [ - { - "original": "https://www.nytimes.com/2019/09/07/world/americas/rio-gay-kiss-comic.html?smid=fb-nytimes&smtyp=cur", - "expanded": "https://www.nytimes.com/2019/09/07/world/americas/rio-gay-kiss-comic.html?smid=fb-nytimes&smtyp=cur", - } - ], - "link": "https://www.nytimes.com/2019/09/07/world/americas/rio-gay-kiss-comic.html?smid=fb-nytimes&smtyp=cur", - "postUrl": "https://www.facebook.com/nytimes/posts/10152009911414999", - "subscriberCount": 16854203, - "score": 9.691304347826087, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBVIgoXJNg7nE2M&w=550&h=550&url=https%3A%2F%2Fstatic01.nyt.com%2Fimages%2F2019%2F09%2F07%2Fworld%2F07brazil-kiss%2F07brazil-kiss-facebookJumbo.jpg&cfs=1&sx=500&sy=0&sw=550&sh=550&_nc_hash=AQD90hjbBkmgAi-z", - "height": 550, - "width": 550, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCX5Hsk1zs-QnL_&url=https%3A%2F%2Fstatic01.nyt.com%2Fimages%2F2019%2F09%2F07%2Fworld%2F07brazil-kiss%2F07brazil-kiss-facebookJumbo.jpg&_nc_hash=AQAnEh_d0w0PqNut", - } - ], - "statistics": { - "actual": { - "likeCount": 1684, - "shareCount": 771, - "commentCount": 289, - "loveCount": 83, - "wowCount": 263, - "hahaCount": 473, - "sadCount": 107, - "angryCount": 788, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 198, - "shareCount": 70, - "commentCount": 89, - "loveCount": 14, - "wowCount": 21, - "hahaCount": 27, - "sadCount": 19, - "angryCount": 22, - "thankfulCount": 0, - }, - }, - "account": { - "id": 7132, - "name": "The New York Times", - "handle": "nytimes", - "profileImage": "https://scontent.xx.fbcdn.net/v/t34.0-1/p200x200/38987133_2766049203424553_1238434690_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkaWRCuHf9GL6ACpzc33xhzk0PaoZZpZJjgHAUJqYB_x5SH2TI2LqBRTlosS59Dtlw&_nc_ht=scontent.xx&oh=6c30114417175d395e99d2e75167ad16&oe=5D765D57", - "subscriberCount": 16854715, - "url": "https://www.facebook.com/5281959998", - "platform": "Facebook", - "platformId": "5281959998", - "verified": True, - }, - }, - { - "id": 70168080694, - "platformId": "8304333127_10159038883103128", - "platform": "Facebook", - "date": "2019-09-07 22:22:37", - "updated": "2019-09-08 00:44:09", - "type": "link", - "title": "Whoa Canada! Bianca Andreescu Captures the U.S. Open Title", - "caption": "wsj.com", - "description": "Bianca Andreescu, a 19-year-old rising star from Canada, defeated Serena Williams 6-3, 7-5 to win the 2019 U.S. Open women’s singles title.", - "message": "In a blowout that got tense late, 19-year-old rising star Bianca Andreescu defeated her idol to win Canada's first major tennis title.", - "expandedLinks": [ - { - "original": "https://on.wsj.com/34yi0YH", - "expanded": "https://www.wsj.com/articles/bianca-andreescu-wins-the-u-s-open-11567893664?mod=e2fb", - } - ], - "link": "https://on.wsj.com/34yi0YH", - "postUrl": "https://www.facebook.com/wsj/posts/10159038883103128", - "subscriberCount": 6360114, - "score": 9.640625, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQARwY3pIH7bBdxO&w=720&h=720&url=https%3A%2F%2Fimages.wsj.net%2Fim-105055%2Fsocial&cfs=1&_nc_hash=AQB51kJj_a5pmM8l", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAPkeH1BgXOxCNQ&url=https%3A%2F%2Fimages.wsj.net%2Fim-105055%2Fsocial&_nc_hash=AQAGTlQG2xuXvh_F", - } - ], - "statistics": { - "actual": { - "likeCount": 434, - "shareCount": 38, - "commentCount": 52, - "loveCount": 61, - "wowCount": 24, - "hahaCount": 3, - "sadCount": 4, - "angryCount": 1, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 26, - "shareCount": 8, - "commentCount": 9, - "loveCount": 3, - "wowCount": 3, - "hahaCount": 8, - "sadCount": 3, - "angryCount": 4, - "thankfulCount": 0, - }, - }, - "account": { - "id": 10335, - "name": "The Wall Street Journal", - "handle": "wsj", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/26734229_10157192613173128_6286097899182572387_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkg3dR3V2rO72fdcQNc6Kdupv3fYH3-VXio9SvAwKULEi36QT0vhIKN0_FvohpQCGs&_nc_ht=scontent.xx&oh=f550584e1e7adab86d889e32b7468801&oe=5DFE7FE9", - "subscriberCount": 6360356, - "url": "https://www.facebook.com/8304333127", - "platform": "Facebook", - "platformId": "8304333127", - "verified": True, - }, - }, - { - "id": 70158010147, - "platformId": "56845382910_10157286969462911", - "platform": "Facebook", - "date": "2019-09-07 19:05:44", - "updated": "2019-09-08 00:45:25", - "type": "link", - "title": "Laura Ingraham Tries To Drink Light Bulb-Stuffed Steak To 'Trigger' Liberals", - "caption": "huffpost.com", - "description": "Spoiler alert: She learned she can't.", - "message": "The Fox News host's earnest efforts to troll liberals reached a new, if not bizarre, level.", - "expandedLinks": [ - { - "original": "http://huffp.st/ePhzz9l", - "expanded": "https://www.huffpost.com/entry/laura-ingraham-tries-to-drink-light-bulb-steak-to-trigger-liberals_n_5d73c962e4b0fde50c2740cd?utm_campaign=hp_fb_pages&utm_source=politics_fb&ncid=fcbklnkushpmg00000013&utm_medium=facebook§ion=politics&fbclid=IwAR3r_8e8E48MWt0sKmt1Qh3SNXit6km5u1xbqTYhSE1dt3cvbQwO44vYIvc", - } - ], - "link": "http://huffp.st/ePhzz9l", - "postUrl": "https://www.facebook.com/HuffPostPolitics/posts/10157286969462911", - "subscriberCount": 2107913, - "score": 9.410468319559229, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDGta8H68A0fkUr&w=720&h=720&url=https%3A%2F%2Fimg.huffingtonpost.com%2Fasset%2F5d73dc653b00002a74d0c177.jpeg%3Fops%3D1778_1000&cfs=1&sx=345&sy=0&sw=1000&sh=1000&_nc_hash=AQC0jKBN9GGCpmxA", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBadF6Ee2wYjtdm&url=https%3A%2F%2Fimg.huffingtonpost.com%2Fasset%2F5d73dc653b00002a74d0c177.jpeg%3Fops%3D1778_1000&_nc_hash=AQApk4tBx_BXnnZz", - } - ], - "statistics": { - "actual": { - "likeCount": 139, - "shareCount": 785, - "commentCount": 1213, - "loveCount": 8, - "wowCount": 153, - "hahaCount": 995, - "sadCount": 54, - "angryCount": 69, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 108, - "shareCount": 74, - "commentCount": 86, - "loveCount": 14, - "wowCount": 11, - "hahaCount": 34, - "sadCount": 9, - "angryCount": 27, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13844, - "name": "HuffPost Politics", - "handle": "HuffPostPolitics", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/18838902_10155124699752911_6971495653588629046_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQm5cko-OrQpOcPI-GqgP9V74INLYLzur0WIBNnrYmgNA33fLG0VMMxSWpg2i7235p0&_nc_ht=scontent.xx&oh=755100a2afdbaf29d5e08e613e66fc6e&oe=5DF42A6A", - "subscriberCount": 2107783, - "url": "https://www.facebook.com/56845382910", - "platform": "Facebook", - "platformId": "56845382910", - "verified": True, - }, - }, - { - "id": 70158000246, - "platformId": "15418366158_10157860918341159", - "platform": "Facebook", - "date": "2019-09-07 19:00:32", - "updated": "2019-09-08 00:44:19", - "type": "link", - "title": "Illinois Paid Millions in Medicaid for People Who Are Already Dead", - "caption": "pjmedia.com", - "description": 'The proof is in the pudding, as they say. In other words, if you want to see the "competency" and "efficiency" of leftists at work, look to Illinois. Among a myriad of state-wide problems created by leftist...', - "message": "Illinois Paid Millions in Medicaid for People Who Are Already Dead", - "expandedLinks": [ - { - "original": "https://pjmedia.com/trending/illinois-paid-millions-in-medicaid-for-people-who-are-already-dead/", - "expanded": "https://pjmedia.com/trending/illinois-paid-millions-in-medicaid-for-people-who-are-already-dead/", - } - ], - "link": "https://pjmedia.com/trending/illinois-paid-millions-in-medicaid-for-people-who-are-already-dead/", - "postUrl": "https://www.facebook.com/PJMedia/posts/10157860918341159", - "subscriberCount": 345146, - "score": 9.321951219512195, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDFW_Wc6ALaK2iY&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73fbbbf7581602fd2d8373%2Fe0030b822866ea4dc215e239a15d2d0102103635_bb06c1fec8f7ae6f502565ee1d84966ccc5c6a5d_facebook&cfs=1&_nc_hash=AQC7X9-liB_eXQPl", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQA2yg1_Reu1pkV_&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73fbbbf7581602fd2d8373%2Fe0030b822866ea4dc215e239a15d2d0102103635_bb06c1fec8f7ae6f502565ee1d84966ccc5c6a5d_facebook&_nc_hash=AQD4tH4FmRBoAZse", - } - ], - "statistics": { - "actual": { - "likeCount": 136, - "shareCount": 1006, - "commentCount": 152, - "loveCount": 1, - "wowCount": 140, - "hahaCount": 28, - "sadCount": 30, - "angryCount": 418, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 40, - "shareCount": 48, - "commentCount": 30, - "loveCount": 4, - "wowCount": 8, - "hahaCount": 27, - "sadCount": 5, - "angryCount": 43, - "thankfulCount": 0, - }, - }, - "account": { - "id": 546413, - "name": "PJ Media", - "handle": "PJMedia", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/11233498_10153918103746159_4425260475851381266_n.jpg?_nc_cat=1&_nc_oc=AQlsQcaTBN0IYmuAz9KhN7jR3MPlfGRQ6pQx6vtSV9AWa6eNztotI3-NTLX1xGzJ6zE&_nc_ht=scontent.xx&oh=15f625aebc03c1c0e428efec7e19fab3&oe=5E04568A", - "subscriberCount": 345163, - "url": "https://www.facebook.com/15418366158", - "platform": "Facebook", - "platformId": "15418366158", - "verified": True, - }, - }, - { - "id": 70162497981, - "platformId": "34407447433_10156144196982434", - "platform": "Facebook", - "date": "2019-09-07 20:34:39", - "updated": "2019-09-08 00:29:54", - "type": "link", - "title": "Roman Polanski, Convicted Child Rapist, Has Won Yet Another Prize", - "caption": "jezebel.com", - "description": "Roman Polanski, who pled guilty to unlawful sexual intercourse with a minor in 1977 and has yet to serve a single day of his sentence, has been awarded the Grand Jury Prize at the Venice International Film Festival for a film about a wrongfully accused man.\r\n", - "message": "Polanski has been persecuted with more than 20 awards since pleading guilty to child rape in 1978.", - "expandedLinks": [ - { - "original": "https://trib.al/HKSWKHH", - "expanded": "https://jezebel.com/roman-polanski-convicted-child-rapist-has-won-yet-ano-1837956995?rev=1567888256061&utm_medium=socialflow&utm_source=jezebel_facebook&utm_campaign=socialflow_jezebel_facebook", - } - ], - "link": "https://trib.al/HKSWKHH", - "postUrl": "https://www.facebook.com/Jezebel/posts/10156144196982434", - "subscriberCount": 815764, - "score": 8.741379310344827, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCw_aA4LmkJCTiz&w=720&h=720&url=https%3A%2F%2Fi.kinja-img.com%2Fgawker-media%2Fimage%2Fupload%2Fs--t-Q8raQV--%2Fc_fill%2Cfl_progressive%2Cg_center%2Ch_900%2Cq_80%2Cw_1600%2Fb7pworkdtxwfa4o2unar.jpg&cfs=1&_nc_hash=AQBzyRZiu5YH78R6", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAK3dExwA-1Y9kH&url=https%3A%2F%2Fi.kinja-img.com%2Fgawker-media%2Fimage%2Fupload%2Fs--t-Q8raQV--%2Fc_fill%2Cfl_progressive%2Cg_center%2Ch_900%2Cq_80%2Cw_1600%2Fb7pworkdtxwfa4o2unar.jpg&_nc_hash=AQAqMA4iHPKH2uV3", - } - ], - "statistics": { - "actual": { - "likeCount": 35, - "shareCount": 87, - "commentCount": 28, - "loveCount": 1, - "wowCount": 12, - "hahaCount": 2, - "sadCount": 9, - "angryCount": 333, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 14, - "shareCount": 6, - "commentCount": 7, - "loveCount": 7, - "wowCount": 4, - "hahaCount": 5, - "sadCount": 3, - "angryCount": 12, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6753, - "name": "Jezebel", - "handle": "Jezebel", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10632833_10152652146387434_9205889665163163075_n.png?_nc_cat=1&_nc_oc=AQmE3moAD_e4DDH0Qk-IkkzGJ36IXDD-O29mmUauemxpi5JLbY-oMjPMeCglmwSb0Rs&_nc_ht=scontent.xx&oh=d7afae2d39ef36c76291f53c416d6c76&oe=5E0F900A", - "subscriberCount": 815764, - "url": "https://www.facebook.com/34407447433", - "platform": "Facebook", - "platformId": "34407447433", - "verified": True, - }, - }, - { - "id": 70166555948, - "platformId": "513813158657249_2597694686935742", - "platform": "Facebook", - "date": "2019-09-07 22:00:19", - "updated": "2019-09-08 00:48:01", - "type": "link", - "title": "Westboro Baptist Church Riots At Marine’s Funeral, Gets Greeted By Wall Of Bikers", - "caption": "taphaps.com", - "description": "The Westboro Baptist Church decided to protest a Marine's funeral. When word of this leaked out, a group of unexpected guests decided to show up and teach them a lesson.", - "message": "The Westboro Baptist Church decided to protest a Marine’s funeral. When word of this leaked out, a group of unexpected guests decided to show up and teach them a lesson.", - "expandedLinks": [ - { - "original": "http://ow.ly/RSR930punma", - "expanded": "https://taphaps.com/westboro-marine-richard-bennett/", - } - ], - "link": "http://ow.ly/RSR930punma", - "postUrl": "https://www.facebook.com/MadWorldNewsCorp/posts/2597694686935742", - "subscriberCount": 2133967, - "score": 8.69260700389105, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBm9Rm06lWlbfsb&w=720&h=720&url=https%3A%2F%2Fi2.wp.com%2Ftaphaps.com%2Fwp-content%2Fuploads%2F2018%2F06%2Fcomposite_15278619383366.jpg%3Ffit%3D800%252C420%26ssl%3D1&cfs=1&_nc_hash=AQCM6dsvk8D4QlT7", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCICcsFt2wd20Iu&url=https%3A%2F%2Fi2.wp.com%2Ftaphaps.com%2Fwp-content%2Fuploads%2F2018%2F06%2Fcomposite_15278619383366.jpg%3Ffit%3D800%252C420%26ssl%3D1&_nc_hash=AQB2ggt2TiOntrL5", - } - ], - "statistics": { - "actual": { - "likeCount": 955, - "shareCount": 426, - "commentCount": 483, - "loveCount": 185, - "wowCount": 14, - "hahaCount": 5, - "sadCount": 94, - "angryCount": 72, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 65, - "shareCount": 46, - "commentCount": 63, - "loveCount": 8, - "wowCount": 6, - "hahaCount": 12, - "sadCount": 7, - "angryCount": 50, - "thankfulCount": 0, - }, - }, - "account": { - "id": 279876, - "name": "Mad World News", - "handle": "MadWorldNewsCorp", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16649435_1331399193565304_7598140519586777175_n.png?_nc_cat=1&_nc_oc=AQkFf7jm82V9pnSg1x0Pqt0rlA2Yl-XqrdIF4h-iVA0BzRc8fXvud27Fd5_bf3n4adY&_nc_ht=scontent.xx&oh=db1ac67cb2a4dc589f0e879b97477ebd&oe=5E151F16", - "subscriberCount": 2135169, - "url": "https://www.facebook.com/513813158657249", - "platform": "Facebook", - "platformId": "513813158657249", - "verified": False, - }, - }, - { - "id": 70175854511, - "platformId": "182919686769_10156515368256770", - "platform": "Facebook", - "date": "2019-09-08 00:30:08", - "updated": "2019-09-08 00:40:44", - "type": "link", - "title": "Trump Using Military Funds To Build 175 Miles Of Border Wall", - "caption": "dailycaller.com", - "description": "'Slap in the face'", - "message": "🇺🇸 🇺🇸 🇺🇸", - "expandedLinks": [ - { - "original": "https://dailycaller.com/2019/09/04/trump-building-174-miles-border-wall/", - "expanded": "https://dailycaller.com/2019/09/04/trump-building-174-miles-border-wall/", - } - ], - "link": "https://dailycaller.com/2019/09/04/trump-building-174-miles-border-wall/", - "postUrl": "https://www.facebook.com/DailyCaller/posts/10156515368256770", - "subscriberCount": 5408115, - "score": 8.655913978494624, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQAHAXubROblxiW2&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f17dbae9d20fe10df2a3%2F8c0635ef178086cb0d59e6adcbee7cbcb0b56194_7e72428da38eb398449cb2531f05fc7b5a13068d_facebook&cfs=1&_nc_hash=AQD9iqh3FO7_aK8L", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAsKlewCzcRYkrb&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f17dbae9d20fe10df2a3%2F8c0635ef178086cb0d59e6adcbee7cbcb0b56194_7e72428da38eb398449cb2531f05fc7b5a13068d_facebook&_nc_hash=AQC8ksYE43suLecj", - } - ], - "statistics": { - "actual": { - "likeCount": 551, - "shareCount": 29, - "commentCount": 129, - "loveCount": 88, - "wowCount": 1, - "hahaCount": 3, - "sadCount": 1, - "angryCount": 3, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 18, - "shareCount": 11, - "commentCount": 24, - "loveCount": 4, - "wowCount": 4, - "hahaCount": 17, - "sadCount": 3, - "angryCount": 12, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13489, - "name": "The Daily Caller", - "handle": "DailyCaller", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471", - "subscriberCount": 5408115, - "url": "https://www.facebook.com/182919686769", - "platform": "Facebook", - "platformId": "182919686769", - "verified": True, - }, - }, - { - "id": 70175812390, - "platformId": "354522044588660_3474487762592057", - "platform": "Facebook", - "date": "2019-09-08 00:00:32", - "updated": "2019-09-08 00:39:40", - "type": "link", - "title": "Cosplayer's lightsaber attachment for her amputated arm attracts praise from Mark Hamill", - "caption": "megaphone.upworthy.com", - "description": "Cosplayer Angel Giuffria and her best friend Trace Wilson created a cool look for San Diego Comic-Con 2019.", - "message": "Awesome!", - "expandedLinks": [ - { - "original": "https://buff.ly/34tgXti", - "expanded": "https://megaphone.upworthy.com/p/cosplayers-lightsaber-attachment-for-her-amputated-arm", - } - ], - "link": "https://buff.ly/34tgXti", - "postUrl": "https://www.facebook.com/Upworthy/posts/3474487762592057", - "subscriberCount": 11752205, - "score": 8.537234042553191, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBPqzN4S_bvpl0m&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d72d2eb595ead5af308f065%2F74ff4746953c774d7e0792890f3f767e79cdcbf8_6a4d91f27330f64461e65fc85106d014d73330f0_facebook&cfs=1&_nc_hash=AQDZ9p6ULaB7D16t", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAGvmrFVvDV0uv2&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d72d2eb595ead5af308f065%2F74ff4746953c774d7e0792890f3f767e79cdcbf8_6a4d91f27330f64461e65fc85106d014d73330f0_facebook&_nc_hash=AQBfrzkG0_ylPd8H", - } - ], - "statistics": { - "actual": { - "likeCount": 1018, - "shareCount": 210, - "commentCount": 37, - "loveCount": 296, - "wowCount": 43, - "hahaCount": 1, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 89, - "shareCount": 36, - "commentCount": 14, - "loveCount": 23, - "wowCount": 6, - "hahaCount": 9, - "sadCount": 6, - "angryCount": 5, - "thankfulCount": 0, - }, - }, - "account": { - "id": 3919, - "name": "Upworthy", - "handle": "Upworthy", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1914363_1176320005742189_4709951186905632219_n.png?_nc_cat=1&_nc_oc=AQlPiX5mYxZC_Xj8_M4a7JZZvCD27izvAXTMtobXrLjwA4S5Pel-CsMh5GMouHt8LNg&_nc_ht=scontent.xx&oh=ba4e0db7c2521356dc17108d8aa4a12a&oe=5E04D944", - "subscriberCount": 11752205, - "url": "https://www.facebook.com/354522044588660", - "platform": "Facebook", - "platformId": "354522044588660", - "verified": True, - }, - }, - { - "id": 70162842971, - "platformId": "223649167822693_1260339267487006", - "platform": "Facebook", - "date": "2019-09-07 20:50:47", - "updated": "2019-09-08 00:44:36", - "type": "link", - "title": "Shia LaBeouf plays his own father in Honey Boy. He’s phenomenal.", - "caption": "vox.com", - "description": "LaBeouf wrote the film based on his own troubled childhood. It’s an exercise in extreme empathy, and a must-see.", - "message": "LaBeouf wrote the film based on his own troubled childhood. It’s an exercise in extreme empathy, and a must-see.", - "expandedLinks": [ - { - "original": "https://www.vox.com/culture/2019/9/7/20852678/honey-boy-review-shia-labeouf-tiff", - "expanded": "https://www.vox.com/culture/2019/9/7/20852678/honey-boy-review-shia-labeouf-tiff", - } - ], - "link": "https://www.vox.com/culture/2019/9/7/20852678/honey-boy-review-shia-labeouf-tiff", - "postUrl": "https://www.facebook.com/Vox/posts/1260339267487006", - "subscriberCount": 2426078, - "score": 8.523364485981308, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQC_nbiaF_fbmuK3&w=600&h=600&url=https%3A%2F%2Fcdn.vox-cdn.com%2Fthumbor%2FlrldWWBAN80k5UXEbmOxanwUTec%3D%2F36x0%3A1182x600%2Ffit-in%2F1200x630%2Fcdn.vox-cdn.com%2Fuploads%2Fchorus_asset%2Ffile%2F19175438%2Fhoneyboy.jpg&cfs=1&sx=260&sy=0&sw=600&sh=600&_nc_hash=AQAKI-eapCdMDK4s", - "height": 600, - "width": 600, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBx2RBl9c23yfdg&url=https%3A%2F%2Fcdn.vox-cdn.com%2Fthumbor%2FlrldWWBAN80k5UXEbmOxanwUTec%3D%2F36x0%3A1182x600%2Ffit-in%2F1200x630%2Fcdn.vox-cdn.com%2Fuploads%2Fchorus_asset%2Ffile%2F19175438%2Fhoneyboy.jpg&_nc_hash=AQDyxNebdOSSGnVR", - } - ], - "statistics": { - "actual": { - "likeCount": 670, - "shareCount": 75, - "commentCount": 68, - "loveCount": 55, - "wowCount": 40, - "hahaCount": 4, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 47, - "shareCount": 12, - "commentCount": 14, - "loveCount": 6, - "wowCount": 5, - "hahaCount": 7, - "sadCount": 5, - "angryCount": 11, - "thankfulCount": 0, - }, - }, - "account": { - "id": 44528, - "name": "Vox", - "handle": "Vox", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15327441_612869972233942_727410529402189533_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQnoAo-srh87mkvD-DKqEDzFi4nn14JVBUE8HPqgKgoKz2LtUzKnd7p6NTRpO6WA_Gg&_nc_ht=scontent.xx&oh=ffdab33a30a7adbfde40574c198f8580&oe=5DF8E26D", - "subscriberCount": 2426279, - "url": "https://www.facebook.com/223649167822693", - "platform": "Facebook", - "platformId": "223649167822693", - "verified": True, - }, - }, - { - "id": 70168125844, - "platformId": "86680728811_10158783743663812", - "platform": "Facebook", - "date": "2019-09-07 22:23:31", - "updated": "2019-09-08 00:45:25", - "type": "link", - "title": "NOAA issues statement supporting Trump's claim Hurricane Dorian threatened Alabama", - "caption": "abcnews.go.com", - "description": " ", - "message": "Five days after Donald Trump said Hurricane Dorian threatened Alabama, the National Oceanic and Atmospheric Association issued a statement late Friday supporting the president's claim and chastising a local branch of the National Weather Service.", - "expandedLinks": [ - { - "original": "https://abcn.ws/2LxdWiS", - "expanded": "https://abcnews.go.com/Politics/noaa-issues-statement-supporting-trumps-claim-hurricane-dorian/story?id=65442468&cid=social_fb_abcn", - } - ], - "link": "https://abcn.ws/2LxdWiS", - "postUrl": "https://www.facebook.com/ABCNews/posts/10158783743663812", - "subscriberCount": 14195962, - "score": 8.483180428134556, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCTUnaDJll_UUFU&w=558&h=558&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FInternational%2Fpresident-trump-dorian-map-ap-jef-190904_hpMain_16x9_992.jpg&cfs=1&sx=434&sy=0&sw=558&sh=558&_nc_hash=AQDMQUlXwjLGxsTb", - "height": 558, - "width": 558, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDSxXQ9JfnoeN5B&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FInternational%2Fpresident-trump-dorian-map-ap-jef-190904_hpMain_16x9_992.jpg&_nc_hash=AQB3C2q6MPbeZXRB", - } - ], - "statistics": { - "actual": { - "likeCount": 494, - "shareCount": 298, - "commentCount": 657, - "loveCount": 33, - "wowCount": 56, - "hahaCount": 624, - "sadCount": 40, - "angryCount": 572, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 133, - "shareCount": 63, - "commentCount": 57, - "loveCount": 16, - "wowCount": 17, - "hahaCount": 15, - "sadCount": 16, - "angryCount": 10, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13878, - "name": "ABC News", - "handle": "ABCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02", - "subscriberCount": 14196629, - "url": "https://www.facebook.com/86680728811", - "platform": "Facebook", - "platformId": "86680728811", - "verified": True, - }, - }, - { - "id": 70172118628, - "platformId": "532854420074062_3748578948501577", - "platform": "Facebook", - "date": "2019-09-07 23:30:30", - "updated": "2019-09-08 00:43:19", - "type": "native_video", - "description": "Who else drank from the garden hose? 🙋\u200d♀️", - "expandedLinks": [ - { - "original": "https://www.facebook.com/thefavesusa/videos/2423928947852297/", - "expanded": "https://www.facebook.com/thefavesusa/videos/2423928947852297/", - } - ], - "link": "https://www.facebook.com/thefavesusa/videos/2423928947852297/", - "postUrl": "https://www.facebook.com/thefavesusa/posts/3748578948501577", - "subscriberCount": 6323442, - "score": 8.475409836065573, - "media": [ - { - "type": "video", - "url": "https://video.xx.fbcdn.net/v/t42.9040-2/69386221_369404493728708_8333827521795588096_n.mp4?_nc_cat=106&efg=eyJybHIiOjMwMCwicmxhIjo1MTIsInZlbmNvZGVfdGFnIjoic3ZlX3NkIn0%3D&_nc_oc=AQl8yuqPhwpOYp-3k3lEh631dxvd6HErFyyQQRm5pf026ER0QfOkyIbwHk-bSMmFFJQ&rl=300&vabr=119&_nc_ht=video.xx&oh=1a7e8cda88710bfd6bfa5efe20d5ed37&oe=5D76A62C", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t15.13418-10/67083314_676082179563971_8068822950096142336_n.jpeg?_nc_cat=111&_nc_oc=AQnEkM29YazhIjlCJEMc16sm3Z-ucihUUjivj0tSs18hFWDWU2hWt2seWIZKG2u7gKE&_nc_ht=scontent.xx&oh=8bb846be002c2d37985d08c40ca21b00&oe=5E0C009F", - "height": 720, - "width": 720, - "full": "https://scontent.xx.fbcdn.net/v/t15.13418-10/67083314_676082179563971_8068822950096142336_n.jpeg?_nc_cat=111&_nc_oc=AQnEkM29YazhIjlCJEMc16sm3Z-ucihUUjivj0tSs18hFWDWU2hWt2seWIZKG2u7gKE&_nc_ht=scontent.xx&oh=8bb846be002c2d37985d08c40ca21b00&oe=5E0C009F", - }, - ], - "statistics": { - "actual": { - "likeCount": 2766, - "shareCount": 1474, - "commentCount": 163, - "loveCount": 116, - "wowCount": 5, - "hahaCount": 129, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 247, - "shareCount": 185, - "commentCount": 47, - "loveCount": 25, - "wowCount": 6, - "hahaCount": 33, - "sadCount": 4, - "angryCount": 2, - "thankfulCount": 0, - }, - }, - "account": { - "id": 48728, - "name": "Faves USA", - "handle": "thefavesusa", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13590243_1529567430402751_5505197343663543097_n.jpg?_nc_cat=1&_nc_oc=AQlqHYa5f3hh3Tu7bwL_7yF5WVkxCnE2WIU8c_5Fs_eMudF84ODKZoLqn8S3lZDdt3g&_nc_ht=scontent.xx&oh=b45134ffcb1aa806ced2cb018887de04&oe=5E0ED98A", - "subscriberCount": 6323373, - "url": "https://www.facebook.com/532854420074062", - "platform": "Facebook", - "platformId": "532854420074062", - "verified": True, - }, - "videoLengthMS": 19966, - }, - { - "id": 70161279172, - "platformId": "210277954204_494897387960275", - "platform": "Facebook", - "date": "2019-09-07 20:07:59", - "updated": "2019-09-08 00:39:40", - "type": "native_video", - "message": 'A Portland Jury clears pair accused of beating up a Trump supporter for "wearing a MAGA hat." Turns out he was trying to start a fight.', - "expandedLinks": [ - { - "original": "https://www.facebook.com/TheYoungTurks/videos/494897387960275/", - "expanded": "https://www.facebook.com/TheYoungTurks/videos/494897387960275/", - } - ], - "link": "https://www.facebook.com/TheYoungTurks/videos/494897387960275/", - "postUrl": "https://www.facebook.com/TheYoungTurks/posts/494897387960275", - "subscriberCount": 2099948, - "score": 8.434615384615384, - "media": [ - { - "type": "video", - "url": "https://video.xx.fbcdn.net/v/t42.9040-2/70666582_532317314181156_4242284247894720512_n.mp4?_nc_cat=108&efg=eyJybHIiOjMwMCwicmxhIjo1OTksInZlbmNvZGVfdGFnIjoic3ZlX3NkIn0%3D&_nc_oc=AQkidjZHM0f2cKgYgy1J2IaIUNTobts-mBT-vH1MUORQ2TAVP1-t_ndykwSDLNaUdSo&rl=300&vabr=141&_nc_ht=video.xx&oh=5235f9aecbb3ac96f96cfeb798560bac&oe=5D76BA2A", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t15.5256-10/69260973_631601874030315_3567498504341291008_n.jpg?_nc_cat=102&_nc_oc=AQnzNPRYkA0-eTOU0Omn1s1XaHepBwKgs6WGDui_Wh0CleypMz3UDQd7TX6v9RvfDAM&_nc_ht=scontent.xx&oh=95988a6fca08311254e15dc697a8dbf2&oe=5E10901F", - "height": 360, - "width": 640, - "full": "https://scontent.xx.fbcdn.net/v/t15.5256-10/69260973_631601874030315_3567498504341291008_n.jpg?_nc_cat=102&_nc_oc=AQnzNPRYkA0-eTOU0Omn1s1XaHepBwKgs6WGDui_Wh0CleypMz3UDQd7TX6v9RvfDAM&_nc_ht=scontent.xx&oh=95988a6fca08311254e15dc697a8dbf2&oe=5E10901F", - }, - ], - "statistics": { - "actual": { - "likeCount": 969, - "shareCount": 475, - "commentCount": 186, - "loveCount": 155, - "wowCount": 25, - "hahaCount": 368, - "sadCount": 8, - "angryCount": 7, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 106, - "shareCount": 51, - "commentCount": 41, - "loveCount": 14, - "wowCount": 8, - "hahaCount": 21, - "sadCount": 6, - "angryCount": 13, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6786, - "name": "The Young Turks", - "handle": "TheYoungTurks", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1003713_10151543513399205_523422522_n.jpg?_nc_cat=1&_nc_oc=AQnnXFBTIz-GDK79X4ZL1tWD8ZS5F3y_makkEyxpcCf_7U3QmoBvJjb9aWlpiMT8dro&_nc_ht=scontent.xx&oh=5684bdb9a01611f4ca6e9ea9dedbc57e&oe=5DF64CB5", - "subscriberCount": 2100186, - "url": "https://www.facebook.com/210277954204", - "platform": "Facebook", - "platformId": "210277954204", - "verified": True, - }, - "videoLengthMS": 226601, - }, - { - "id": 70157861820, - "platformId": "266790296879_10157610319761880", - "platform": "Facebook", - "date": "2019-09-07 19:02:02", - "updated": "2019-09-08 00:41:55", - "type": "link", - "title": "Opinion | Trump’s on a Path to a One-Term Presidency", - "caption": "bloomberg.com", - "description": "To get re-elected, he will need to truly end the trade war.", - "message": "Trump may be following in George H.W. Bush's footsteps without meaning to.", - "expandedLinks": [ - { - "original": "https://bloom.bg/2LDuYf4", - "expanded": "https://www.bloomberg.com/opinion/articles/2019-08-29/how-trump-is-like-george-h-w-bush?utm_content=business&utm_source=facebook&utm_campaign=socialflow-organic&cmpid=socialflow-facebook-business&utm_medium=social", - } - ], - "link": "https://bloom.bg/2LDuYf4", - "postUrl": "https://www.facebook.com/bloombergbusiness/posts/10157610319761880", - "subscriberCount": 2955474, - "score": 8.300970873786408, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBG6bcCSWbVI9VT&w=720&h=720&url=https%3A%2F%2Fs3.amazonaws.com%2Fprod-cust-photo-posts-jfaikqealaka%2F3687-a50e4c223504f106b77d0e43d433a6e5.jpg&cfs=1&_nc_hash=AQDJXDhZ9O3u2mdA", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCB2w8esj-MwK9d&url=https%3A%2F%2Fs3.amazonaws.com%2Fprod-cust-photo-posts-jfaikqealaka%2F3687-a50e4c223504f106b77d0e43d433a6e5.jpg&_nc_hash=AQABfg-6cmYGHxYr", - } - ], - "statistics": { - "actual": { - "likeCount": 330, - "shareCount": 105, - "commentCount": 209, - "loveCount": 104, - "wowCount": 3, - "hahaCount": 102, - "sadCount": 0, - "angryCount": 2, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 40, - "shareCount": 16, - "commentCount": 18, - "loveCount": 3, - "wowCount": 5, - "hahaCount": 10, - "sadCount": 3, - "angryCount": 8, - "thankfulCount": 0, - }, - }, - "account": { - "id": 10343, - "name": "Bloomberg", - "handle": "bloombergbusiness", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/31790536_10156383343951880_9143173959372505088_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQm0CmNHVi4wKjfV2xKZ8WmMFbjVnwkn6rwlbqPewk5wTL0Plzu-cY8b0zLOAhS4DLw&_nc_ht=scontent.xx&oh=6eda22b5a7936ec78ea6929b3ed38430&oe=5E1356BD", - "subscriberCount": 2955809, - "url": "https://www.facebook.com/266790296879", - "platform": "Facebook", - "platformId": "266790296879", - "verified": True, - }, - }, - { - "id": 70164120670, - "platformId": "177486166274_10156706635101275", - "platform": "Facebook", - "date": "2019-09-07 21:05:00", - "updated": "2019-09-08 00:42:17", - "type": "link", - "title": "Mom shoots 'intruder' who turned out to be her daughter surprising her from college", - "caption": "yahoo.com", - "description": "Late Friday night, the 18-year-old, along with her boyfriend, entered her mother's house in an attempt to surprise her. Her mother thought it was an intruder entering her home.", - "message": "(W) Home of the brave.... When her daughter opened her mom’s bedroom door, the mother fired a single shot from a .38 special, striking her daughter's elbow. It wasn't until the mom fired the gun that she realized it was her daughter.", - "expandedLinks": [ - { - "original": "https://www.yahoo.com/lifestyle/mom-shoots-intruder-who-turned-out-to-be-her-daughter-surprising-her-from-college-225514706.html", - "expanded": "https://www.yahoo.com/lifestyle/mom-shoots-intruder-who-turned-out-to-be-her-daughter-surprising-her-from-college-225514706.html", - } - ], - "link": "https://www.yahoo.com/lifestyle/mom-shoots-intruder-who-turned-out-to-be-her-daughter-surprising-her-from-college-225514706.html", - "postUrl": "https://www.facebook.com/beingliberal.org/posts/10156706635101275", - "subscriberCount": 1693698, - "score": 8.274353876739562, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDOTTId7tCQIL7n&w=720&h=720&url=https%3A%2F%2Fs.yimg.com%2Fuu%2Fapi%2Fres%2F1.2%2FHQfXkS3QwU_I_QWKNwy6tQ--%7EB%2FaD0xMDgwO3c9MTkyMDtzbT0xO2FwcGlkPXl0YWNoeW9u%2Fhttp%3A%2F%2Fmedia.zenfs.com%2Fen-US%2Fvideo%2Fwjw_cleveland_686%2Fcdb39c35e380bad83d6543cf2498bdde&cfs=1&_nc_hash=AQAxnw1IYaZgszl1", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCKeO31cDOTHyer&url=https%3A%2F%2Fs.yimg.com%2Fuu%2Fapi%2Fres%2F1.2%2FHQfXkS3QwU_I_QWKNwy6tQ--%7EB%2FaD0xMDgwO3c9MTkyMDtzbT0xO2FwcGlkPXl0YWNoeW9u%2Fhttp%3A%2F%2Fmedia.zenfs.com%2Fen-US%2Fvideo%2Fwjw_cleveland_686%2Fcdb39c35e380bad83d6543cf2498bdde&_nc_hash=AQD4QM-W36TBTH1w", - } - ], - "statistics": { - "actual": { - "likeCount": 260, - "shareCount": 1341, - "commentCount": 528, - "loveCount": 5, - "wowCount": 489, - "hahaCount": 115, - "sadCount": 892, - "angryCount": 532, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 151, - "shareCount": 137, - "commentCount": 83, - "loveCount": 18, - "wowCount": 19, - "hahaCount": 34, - "sadCount": 11, - "angryCount": 50, - "thankfulCount": 0, - }, - }, - "account": { - "id": 5860, - "name": "Being Liberal", - "handle": "beingliberal.org", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16865169_10154418564961275_3050958479071030073_n.png?_nc_cat=1&_nc_oc=AQlPF5wIrIXWCeRBPDA5P17NqQMaux6LCm9Ak8V6ktaSHP0ajoY7MreFOF-RleH_5sQ&_nc_ht=scontent.xx&oh=39015e43af0ae9881035d6aa4a9fe5fc&oe=5E0A093D", - "subscriberCount": 1693705, - "url": "https://www.facebook.com/177486166274", - "platform": "Facebook", - "platformId": "177486166274", - "verified": True, - }, - }, - { - "id": 70164191280, - "platformId": "7292655492_396666331043489", - "platform": "Facebook", - "date": "2019-09-07 21:08:45", - "updated": "2019-09-08 00:43:45", - "type": "live_video_complete", - "message": "Hear the plan to proceed with the impeachment inquiry against Donald Trump! Rep. Pramila Jayapal and Rep. Jamie Raskin share their blueprint to hold Trump accountable for his crimes as the House Judiciary Committee begins impeachment proceedings. (via act.tv)", - "expandedLinks": [ - { - "original": "https://www.facebook.com/moveon/videos/396666331043489/", - "expanded": "https://www.facebook.com/moveon/videos/396666331043489/", - } - ], - "link": "https://www.facebook.com/moveon/videos/396666331043489/", - "postUrl": "https://www.facebook.com/moveon/posts/396666331043489", - "subscriberCount": 1654145, - "score": 8.243869209809265, - "media": [ - { - "type": "video", - "url": "https://video.xx.fbcdn.net/v/t42.1790-2/10000000_384414495807916_8369269378861398547_n.mp4?_nc_cat=109&vs=b1c91073f726db69&_nc_vs=HBksFQAYJEdJQ1dtQUNzR1hoNW4xMEJBQk42bG1Rb25pVjBidjRHQUFBRhUAABUAGCRHSUNXbUFBb0dwZGxIRGdDQUxaOTVyM0FRY1k4YnY0R0FBQUYVAgAoRC1pICclcycgLWZiX3VzZV90ZmR0X3N0YXJ0dGltZSAxIC1pICclcycgLWMgY29weSAtbW92ZmxhZ3MgZmFzdHN0YXJ0KwGIEnByb2dyZXNzaXZlX3JlY2lwZQExFQAlABwAABgKMTAzNTc0NTI3NxbEgMiygrG0ARXGDxkFGAJDMxgDYXYxHBdAu7PybpeNUBgZZGFzaF9saXZlX21kX2ZyYWdfMl92aWRlbxIAGBh2aWRlb3MudnRzLmNhbGxiYWNrLnByb2QZHBUAFYqfAwAoElZJREVPX1ZJRVdfUkVRVUVTVBsGiBVvZW1fdGFyZ2V0X2VuY29kZV90YWcGb2VwX3NkE29lbV9yZXF1ZXN0X3RpbWVfbXMNMTU2Nzg5NzY4Njc1NAxvZW1fY2ZnX3J1bGUSd2FzbGl2ZV9zZF90cmltbWVkDG9lbV92aWRlb19pZA8zOTY2NzAxNDQzNzY0NDESb2VtX3ZpZGVvX2Fzc2V0X2lkDzM5NjY2NjMyNDM3NjgyMxVvZW1fdmlkZW9fcmVzb3VyY2VfaWQPMzk2NjY2MzIxMDQzNDkwJQQcHBwVgLUYGwFVAJKVARsBVQD06gEcFQIAAAAAAA%3D%3D&efg=eyJ2ZW5jb2RlX3RhZyI6Im9lcF9zZCJ9&_nc_log=1&_nc_oc=AQn6cMan5-t8T_lfzD75KzZyHqeMamHDTQHAvb_GPdVKeVB-V0EcB49vddi0kVoi78c&_nc_ht=video.xx&oh=f59a23e608d9d22929afc11c1da23757&oe=5D76966C&_nc_rid=efa046a5126147e", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t15.5256-10/s720x720/68874404_396668534376602_4308557968388915200_n.jpg?_nc_cat=106&_nc_log=1&_nc_oc=AQmMVLY5zqQURDB2TKO1zooHpKwvzND4lK84W6kB-Rtq8iKO3maR8xP9pLHxIUZSD00&_nc_ht=scontent.xx&oh=b212bce3c0fb77ad0529d301f5e8c6fb&oe=5E1167E5", - "height": 405, - "width": 720, - "full": "https://scontent.xx.fbcdn.net/v/t15.5256-10/68874404_396668534376602_4308557968388915200_n.jpg?_nc_cat=106&_nc_oc=AQmMVLY5zqQURDB2TKO1zooHpKwvzND4lK84W6kB-Rtq8iKO3maR8xP9pLHxIUZSD00&_nc_ht=scontent.xx&oh=7170dece3f1407f19873cf1dbee215b5&oe=5E14EAAD", - }, - ], - "statistics": { - "actual": { - "likeCount": 1026, - "shareCount": 789, - "commentCount": 3268, - "loveCount": 566, - "wowCount": 17, - "hahaCount": 195, - "sadCount": 10, - "angryCount": 180, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 289, - "shareCount": 119, - "commentCount": 194, - "loveCount": 105, - "wowCount": 4, - "hahaCount": 6, - "sadCount": 9, - "angryCount": 8, - "thankfulCount": 0, - }, - }, - "account": { - "id": 3832, - "name": "MoveOn", - "handle": "moveon", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/31206661_10155375246245493_295037061581229251_n.png?_nc_cat=1&_nc_oc=AQlSE1FqdCbaeopNV1yaNtJ3CmFLidqKES5CzQDuKERpCBGKUk_e3fO242Wi3KvNKSE&_nc_ht=scontent.xx&oh=ca9e5b7aef01fe823dc1929cfd53827d&oe=5E10EEAD", - "subscriberCount": 1654130, - "url": "https://www.facebook.com/7292655492", - "platform": "Facebook", - "platformId": "7292655492", - "verified": True, - }, - "videoLengthMS": 7091947, - "liveVideoStatus": "completed", - }, - { - "id": 70174258568, - "platformId": "20446254070_10156890840439071", - "platform": "Facebook", - "date": "2019-09-08 00:00:29", - "updated": "2019-09-08 00:42:26", - "type": "link", - "title": "Trump says he invited Taliban leaders to Camp David for a secret meeting, but canceled because of a recent attack that killed a US soldier", - "caption": "businessinsider.com", - "description": '"The major Taliban leaders and, separately, the President of Afghanistan, were going to secretly meet with me at Camp David on Sunday," Trump tweeted.', - "message": "Trump said he's also called off all peace negotiations with the Taliban.", - "expandedLinks": [ - { - "original": "https://bit.ly/2PXgMDl", - "expanded": "https://www.businessinsider.com/trump-canceled-secret-camp-david-meeting-with-taliban-leaders-2019-9?utm_content=buffer324e2&utm_medium=social&utm_source=facebook.com&utm_campaign=buffer-bi", - } - ], - "link": "https://bit.ly/2PXgMDl", - "postUrl": "https://www.facebook.com/businessinsider/posts/10156890840439071", - "subscriberCount": 9107012, - "score": 8.193548387096774, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQAuUoFSrIOPEUbT&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d7443d4665c02241436f629%2Fc9e2a350ed76b667887bd8bc901046ef19df8a7d_6480a23340bd0fff02ebb6fa6d654bc5bee2c290_facebook&cfs=1&_nc_hash=AQC1rMqJsyRwHoJG", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBkk8ywT_dgoUXY&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d7443d4665c02241436f629%2Fc9e2a350ed76b667887bd8bc901046ef19df8a7d_6480a23340bd0fff02ebb6fa6d654bc5bee2c290_facebook&_nc_hash=AQClYoSFI8eIT1VN", - } - ], - "statistics": { - "actual": { - "likeCount": 35, - "shareCount": 26, - "commentCount": 96, - "loveCount": 1, - "wowCount": 9, - "hahaCount": 60, - "sadCount": 1, - "angryCount": 26, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 9, - "shareCount": 4, - "commentCount": 4, - "loveCount": 2, - "wowCount": 2, - "hahaCount": 4, - "sadCount": 3, - "angryCount": 3, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6648, - "name": "Business Insider", - "handle": "businessinsider", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/20140008_10154867513079071_8190657407315988923_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkI55CBCj4kJdip-PX9AJ_S4mxJ5XQ4nlum3ikySzQgBRQCJSXsyjHW-8w8qPH2aX4&_nc_ht=scontent.xx&oh=4d024551fc98af700d89602c6980c3c0&oe=5E155CB9", - "subscriberCount": 9107575, - "url": "https://www.facebook.com/20446254070", - "platform": "Facebook", - "platformId": "20446254070", - "verified": True, - }, - }, - { - "id": 70157398315, - "platformId": "155869377766434_3572569126096425", - "platform": "Facebook", - "date": "2019-09-07 18:52:08", - "updated": "2019-09-07 23:00:35", - "type": "link", - "title": "Thousands listed as missing in Bahamas in Hurricane Dorian’s wake", - "caption": "nbcnews.com", - "description": " ", - "message": "Thousands of people are desperately trying to find loved ones in the Bahamas.", - "expandedLinks": [ - { - "original": "https://nbcnews.to/34xqqzs", - "expanded": "https://www.nbcnews.com/news/world/thousands-listed-missing-bahamas-hurricane-dorian-s-wake-n1050791?cid=sm_npd_nn_fb_ma", - } - ], - "link": "https://nbcnews.to/34xqqzs", - "postUrl": "https://www.facebook.com/NBCNews/posts/3572569126096425", - "subscriberCount": 9970622, - "score": 8.153543307086615, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQD6stbq-rFP7Jda&w=720&h=720&url=https%3A%2F%2Fmedia1.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F3000001%2F190906-bahamas-aftermath-dorian-al-1111_fbd341856b3fa8ce3a08a04f0fca9b14.nbcnews-fp-1200-630.jpg&cfs=1&_nc_hash=AQBVY5Go-4zF-tlS", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCocMqpL-yoqFsO&url=https%3A%2F%2Fmedia1.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F3000001%2F190906-bahamas-aftermath-dorian-al-1111_fbd341856b3fa8ce3a08a04f0fca9b14.nbcnews-fp-1200-630.jpg&_nc_hash=AQDssZadqERvIDEf", - } - ], - "statistics": { - "actual": { - "likeCount": 127, - "shareCount": 567, - "commentCount": 95, - "loveCount": 0, - "wowCount": 62, - "hahaCount": 1, - "sadCount": 1214, - "angryCount": 5, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 61, - "shareCount": 50, - "commentCount": 54, - "loveCount": 10, - "wowCount": 19, - "hahaCount": 19, - "sadCount": 21, - "angryCount": 20, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13889, - "name": "NBC News", - "handle": "NBCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/58460954_3259154034104604_4667908299973197824_n.png?_nc_cat=1&_nc_oc=AQkP72-xbAw6uUN-KZG8hLfS-bT5o6BRIMSNURKuXBbEhrFa7sT75fvZfTBZDVa21CU&_nc_ht=scontent.xx&oh=ddb1e61de6dabbf61e903f59efde1f0c&oe=5DF7A653", - "subscriberCount": 9970540, - "url": "https://www.facebook.com/155869377766434", - "platform": "Facebook", - "platformId": "155869377766434", - "verified": True, - }, - }, - { - "id": 70172968858, - "platformId": "1830665590513511_2501693993410664", - "platform": "Facebook", - "date": "2019-09-07 23:35:59", - "updated": "2019-09-08 00:41:20", - "type": "link", - "title": "Trump says he's called off secret Taliban meeting at Camp David over Afghanistan bombing", - "caption": "axios.com", - "description": 'They were "trying to build false leverage" with the bombing, he tweeted.', - "message": 'President Trump tweets: "Unbeknownst to almost everyone, the major Taliban leaders and, separately, the President of Afghanistan, were going to secretly meet with me at Camp David on Sunday. They were coming to the United States tonight. Unfortunately, in order to build false leverage, they admitted to an attack in Kabul that killed one of our great great soldiers, and 11 other people. I immediately cancelled the meeting and called off peace negotiations."', - "expandedLinks": [ - { - "original": "https://www.axios.com/kabul-bombing-trump-tweets-he-called-off-taliban-talks-73d1b998-375c-4e09-9a4c-6c2355bd2019.html?utm_source=facebook&utm_medium=social&utm_campaign=onhrs", - "expanded": "https://www.axios.com/kabul-bombing-trump-tweets-he-called-off-taliban-talks-73d1b998-375c-4e09-9a4c-6c2355bd2019.html?utm_source=facebook&utm_medium=social&utm_campaign=onhrs", - } - ], - "link": "https://www.axios.com/kabul-bombing-trump-tweets-he-called-off-taliban-talks-73d1b998-375c-4e09-9a4c-6c2355bd2019.html?utm_source=facebook&utm_medium=social&utm_campaign=onhrs", - "postUrl": "https://www.facebook.com/axiosnews/posts/2501693993410664", - "subscriberCount": 339339, - "score": 8.062176165803109, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDkia2HrmsaC2WF&w=720&h=720&url=https%3A%2F%2Fimages.axios.com%2FmVrpxqNy0PvpzRVMzYcFpeqxy8s%3D%2F0x0%3A5871x3302%2F1920x1080%2F2019%2F09%2F07%2F1567898519236.jpg&cfs=1&sx=505&sy=0&sw=1080&sh=1080&_nc_hash=AQCciPKG-QM_Xd3G", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCtLVStevGG0CNY&url=https%3A%2F%2Fimages.axios.com%2FmVrpxqNy0PvpzRVMzYcFpeqxy8s%3D%2F0x0%3A5871x3302%2F1920x1080%2F2019%2F09%2F07%2F1567898519236.jpg&_nc_hash=AQAOf6U399snb_jC", - } - ], - "statistics": { - "actual": { - "likeCount": 133, - "shareCount": 276, - "commentCount": 521, - "loveCount": 10, - "wowCount": 60, - "hahaCount": 399, - "sadCount": 7, - "angryCount": 150, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 61, - "shareCount": 38, - "commentCount": 32, - "loveCount": 9, - "wowCount": 8, - "hahaCount": 15, - "sadCount": 10, - "angryCount": 20, - "thankfulCount": 0, - }, - }, - "account": { - "id": 1431632, - "name": "Axios", - "handle": "axiosnews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/46844445_2289864377926961_9207563348864925696_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQncZ-V-nWa7ihCtPUY2OE7NX8kzbdrK9hiEMhqNa6qBeOKkh3VKYYgS2lvKd-xjZnI&_nc_ht=scontent.xx&oh=3fa348414b7b9cfcabc2cd5bc93789f4&oe=5E0F6422", - "subscriberCount": 339358, - "url": "https://www.facebook.com/1830665590513511", - "platform": "Facebook", - "platformId": "1830665590513511", - "verified": True, - }, - }, - { - "id": 70159083274, - "platformId": "182919686769_10156514740791770", - "platform": "Facebook", - "date": "2019-09-07 19:30:03", - "updated": "2019-09-08 00:40:44", - "type": "link", - "title": "Bernie Sanders Trots Out Linda Sarsour As A Campaign Surrogate", - "caption": "dailycaller.com", - "description": "Sen. Bernie Sanders enlisted the services of far-left activist Linda Sarsour to campaign for him Tuesday.", - "message": "👀", - "expandedLinks": [ - { - "original": "https://dailycaller.com/2019/09/07/bernie-sanders-linda-sarsour-jewish-israel/", - "expanded": "https://dailycaller.com/2019/09/07/bernie-sanders-linda-sarsour-jewish-israel/", - } - ], - "link": "https://dailycaller.com/2019/09/07/bernie-sanders-linda-sarsour-jewish-israel/", - "postUrl": "https://www.facebook.com/DailyCaller/posts/10156514740791770", - "subscriberCount": 5408428, - "score": 7.962800875273523, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQACjaoDOCXkFkcN&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73dcf19809dd005d498638%2F5c68b2a57cab7e35a5e8f5b773152a5e109394bd_22df6d5faf11f33cc50e308c256b76e777c5ec58_facebook&cfs=1&_nc_hash=AQBwIFadr-2O_lob", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDuj5H_nVm_5Yrb&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73dcf19809dd005d498638%2F5c68b2a57cab7e35a5e8f5b773152a5e109394bd_22df6d5faf11f33cc50e308c256b76e777c5ec58_facebook&_nc_hash=AQCBL_eis9doenDw", - } - ], - "statistics": { - "actual": { - "likeCount": 151, - "shareCount": 741, - "commentCount": 939, - "loveCount": 1, - "wowCount": 179, - "hahaCount": 510, - "sadCount": 57, - "angryCount": 1061, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 142, - "shareCount": 65, - "commentCount": 128, - "loveCount": 10, - "wowCount": 17, - "hahaCount": 48, - "sadCount": 8, - "angryCount": 39, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13489, - "name": "The Daily Caller", - "handle": "DailyCaller", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471", - "subscriberCount": 5408115, - "url": "https://www.facebook.com/182919686769", - "platform": "Facebook", - "platformId": "182919686769", - "verified": True, - }, - }, - { - "id": 70172724809, - "platformId": "319569361390023_3033649303315335", - "platform": "Facebook", - "date": "2019-09-07 23:30:05", - "updated": "2019-09-08 00:33:02", - "type": "link", - "title": "David Hogg: White people feel it's American to pick up of gun because of their xenophobia", - "caption": "twitchy.com", - "description": "It's always good to hear David Hogg and Chris Hayes discuss white gun owners.", - "message": 'Maybe some white people don\'t want to "explore" being robbed or raped.', - "expandedLinks": [ - { - "original": "https://twitchy.com/brettt-3136/2019/09/07/david-hogg-white-people-feel-its-american-to-pick-up-of-gun-because-of-their-xenophobia/?utm_content=buffer6a4a5&utm_medium=social&utm_source=facebook.com&utm_campaign=buffer", - "expanded": "https://twitchy.com/brettt-3136/2019/09/07/david-hogg-white-people-feel-its-american-to-pick-up-of-gun-because-of-their-xenophobia/?utm_content=buffer6a4a5&utm_medium=social&utm_source=facebook.com&utm_campaign=buffer", - } - ], - "link": "https://twitchy.com/brettt-3136/2019/09/07/david-hogg-white-people-feel-its-american-to-pick-up-of-gun-because-of-their-xenophobia/?utm_content=buffer6a4a5&utm_medium=social&utm_source=facebook.com&utm_campaign=buffer", - "postUrl": "https://www.facebook.com/TeamTwitchy/posts/3033649303315335", - "subscriberCount": 1230972, - "score": 7.857142857142857, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQB7bT4nl7lkv-Df&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d743818b7ba8c1f095735f2%2F13d92b6c4918c5e371fb73d839e5ebf767c467fa_02cfbdaa9db52496964c2f8d6d16b3195a9de7a6_facebook&cfs=1&_nc_hash=AQCxNnhZKprHuha9", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2zIJAVELVhdO7&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d743818b7ba8c1f095735f2%2F13d92b6c4918c5e371fb73d839e5ebf767c467fa_02cfbdaa9db52496964c2f8d6d16b3195a9de7a6_facebook&_nc_hash=AQAuc9IHR5rV3WRK", - } - ], - "statistics": { - "actual": { - "likeCount": 23, - "shareCount": 27, - "commentCount": 206, - "loveCount": 1, - "wowCount": 3, - "hahaCount": 85, - "sadCount": 2, - "angryCount": 38, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 8, - "shareCount": 5, - "commentCount": 10, - "loveCount": 2, - "wowCount": 3, - "hahaCount": 12, - "sadCount": 2, - "angryCount": 7, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13491, - "name": "Twitchy", - "handle": "TeamTwitchy", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1510706_1291332407547042_3464451234671669539_n.jpg?_nc_cat=104&_nc_oc=AQlx0XkhJRril3GnGFgvw8EB7HLdxYW1VUJdclk_vtkSyD2YU5sgdJRr3b2u_ybasgg&_nc_ht=scontent.xx&oh=4c3a5afcfd13d7b38b6853399aa44529&oe=5E0B1BA3", - "subscriberCount": 1230881, - "url": "https://www.facebook.com/319569361390023", - "platform": "Facebook", - "platformId": "319569361390023", - "verified": True, - }, - }, - { - "id": 70174473556, - "platformId": "134486075205_10163969337575206", - "platform": "Facebook", - "date": "2019-09-08 00:06:06", - "updated": "2019-09-08 00:43:59", - "type": "link", - "title": "AOC demands Trump’s impeachment amid probe into his Scottish resort", - "caption": "nypost.com", - "description": "Rep. Alexandria Ocasio-Cortez has issued a fresh call to impeach President Trump amid reports of a House investigation into a military stop at his Scottish golf club. “The President is corrupt and …", - "message": "“The President is corrupt and must be impeached,” the freshman Democrat posted on Twitter Friday.", - "expandedLinks": [ - { - "original": "https://trib.al/645cHyB", - "expanded": "https://nypost.com/2019/09/07/aoc-demands-trumps-impeachment-amid-probe-into-his-scottish-resort/?sr_share=facebook&utm_source=NYPFacebook&utm_medium=SocialFlow&utm_campaign=SocialFlow", - } - ], - "link": "https://trib.al/645cHyB", - "postUrl": "https://www.facebook.com/NYPost/posts/10163969337575206", - "subscriberCount": 4182920, - "score": 7.796610169491525, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDn49MSINxbWE43&w=720&h=720&url=https%3A%2F%2Fthenypost.files.wordpress.com%2F2019%2F09%2Faoc-demands-trump-impeachment.jpg%3Fquality%3D90%26strip%3Dall%26w%3D1200&cfs=1&_nc_hash=AQBVKLCAMMKs3Qot", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCDfTkvw58OC2Sh&url=https%3A%2F%2Fthenypost.files.wordpress.com%2F2019%2F09%2Faoc-demands-trump-impeachment.jpg%3Fquality%3D90%26strip%3Dall%26w%3D1200&_nc_hash=AQCPwMe3FJbBiyK5", - } - ], - "statistics": { - "actual": { - "likeCount": 231, - "shareCount": 94, - "commentCount": 259, - "loveCount": 51, - "wowCount": 4, - "hahaCount": 255, - "sadCount": 0, - "angryCount": 26, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 28, - "shareCount": 22, - "commentCount": 30, - "loveCount": 3, - "wowCount": 8, - "hahaCount": 14, - "sadCount": 6, - "angryCount": 7, - "thankfulCount": 0, - }, - }, - "account": { - "id": 10342, - "name": "New York Post", - "handle": "NYPost", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12932928_10157483552025206_1176575955706691041_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQnPmbZuC7S1v1NTPRZ7rWQU4EucwAW3nKx-aXD0PzlPsD3ifQpdaLcXEegH730Wy_o&_nc_ht=scontent.xx&oh=c77d86309611fa2972df1979bf6cab9e&oe=5E0827CA", - "subscriberCount": 4183079, - "url": "https://www.facebook.com/134486075205", - "platform": "Facebook", - "platformId": "134486075205", - "verified": True, - }, - }, - { - "id": 70166864447, - "platformId": "268914272540_10156464324502541", - "platform": "Facebook", - "date": "2019-09-07 22:00:07", - "updated": "2019-09-08 00:30:46", - "type": "link", - "title": "Undocumented man who cooperated in case against Jamaican kingpin Christopher ‘Dudus’ Coke fights deadly deportation", - "caption": "nydailynews.com", - "description": " ", - "message": "An undocumented man who helped the government bring down one of Jamaica’s most notorious drug kingpins faces a deportation death sentence. The immigrant, identified in court papers only as Sean B., was a cooperating witness in the case against Christopher “Dudus” Coke in 2011, which made him a marked man.", - "expandedLinks": [ - { - "original": "https://trib.al/awiaROM", - "expanded": "https://www.nydailynews.com/new-york/ny-coke-cooperator-deportation-20190907-2bgpnpfbpzearlsg4z6raexrye-story.html", - } - ], - "link": "https://trib.al/awiaROM", - "postUrl": "https://www.facebook.com/NYDailyNews/posts/10156464324502541", - "subscriberCount": 3119682, - "score": 7.7164179104477615, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCb_uyXtgvEH8YO&w=720&h=720&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2FzwNc3qlGv_FRTNABd4zyhmfutEU%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2FG27YK3ENX5FAXIEFQ7D3DHWYNI.jpg&cfs=1&sx=154&sy=0&sw=869&sh=869&_nc_hash=AQDIB2IEwrFbBQfH", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD8pD_gt7qTTxhB&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2FzwNc3qlGv_FRTNABd4zyhmfutEU%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2FG27YK3ENX5FAXIEFQ7D3DHWYNI.jpg&_nc_hash=AQDdkbiscfR0BlCW", - } - ], - "statistics": { - "actual": { - "likeCount": 114, - "shareCount": 456, - "commentCount": 128, - "loveCount": 2, - "wowCount": 103, - "hahaCount": 23, - "sadCount": 23, - "angryCount": 185, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 33, - "shareCount": 30, - "commentCount": 23, - "loveCount": 4, - "wowCount": 10, - "hahaCount": 14, - "sadCount": 6, - "angryCount": 14, - "thankfulCount": 0, - }, - }, - "account": { - "id": 18752, - "name": "New York Daily News", - "handle": "NYDailyNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/34963357_10155516739962541_1916910854155010048_n.jpg?_nc_cat=1&_nc_oc=AQmjFK4eo-CK8fL21CSJr1btV3Al6e74byD7EyXVL8apaCEHf5ql7TW_ZRkUiYID0qY&_nc_ht=scontent.xx&oh=e33f579d2d00c6afc68a0e7cbd70b6c8&oe=5E0623E1", - "subscriberCount": 3120017, - "url": "https://www.facebook.com/268914272540", - "platform": "Facebook", - "platformId": "268914272540", - "verified": True, - }, - }, - { - "id": 70160607290, - "platformId": "20446254070_10156890348774071", - "platform": "Facebook", - "date": "2019-09-07 20:05:01", - "updated": "2019-09-07 23:47:08", - "type": "link", - "title": "A vegan sued her neighbors for cooking meat in their backyard, and now thousands are planning a barbecue just to annoy her", - "caption": "insider.com", - "description": "Cilla Carden, a vegan from Perth, Australia, recently sued her neighbors for having barbecues in their backyard, among other complaints.", - "message": "The courts dismissed her case and denied her appeal, but Carden said she wouldn't stop fighting.", - "expandedLinks": [ - { - "original": "https://bit.ly/2HRRtMt", - "expanded": "https://www.insider.com/vegan-sued-neighbors-cooking-meat-in-their-backyard-2019-9?utm_content=buffer3311a&utm_medium=social&utm_source=facebook.com&utm_campaign=buffer-bi", - } - ], - "link": "https://bit.ly/2HRRtMt", - "postUrl": "https://www.facebook.com/businessinsider/posts/10156890348774071", - "subscriberCount": 9107012, - "score": 7.408163265306122, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDLlBBaIbf2Ofn8&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d727375508525086669bc32%2F23f2f8b77ee25719c0d7fbda1cd415481f2fc4ac_e87e2781797f5cbd3c789c90679fc3b18e6ad997_facebook&cfs=1&_nc_hash=AQAdosUrYP86btQh", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAdBjnZNFZjH3HL&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d727375508525086669bc32%2F23f2f8b77ee25719c0d7fbda1cd415481f2fc4ac_e87e2781797f5cbd3c789c90679fc3b18e6ad997_facebook&_nc_hash=AQAd_QWYfBovdu0G", - } - ], - "statistics": { - "actual": { - "likeCount": 61, - "shareCount": 83, - "commentCount": 51, - "loveCount": 6, - "wowCount": 15, - "hahaCount": 144, - "sadCount": 0, - "angryCount": 3, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 12, - "shareCount": 6, - "commentCount": 6, - "loveCount": 3, - "wowCount": 3, - "hahaCount": 7, - "sadCount": 5, - "angryCount": 7, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6648, - "name": "Business Insider", - "handle": "businessinsider", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/20140008_10154867513079071_8190657407315988923_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkI55CBCj4kJdip-PX9AJ_S4mxJ5XQ4nlum3ikySzQgBRQCJSXsyjHW-8w8qPH2aX4&_nc_ht=scontent.xx&oh=4d024551fc98af700d89602c6980c3c0&oe=5E155CB9", - "subscriberCount": 9107575, - "url": "https://www.facebook.com/20446254070", - "platform": "Facebook", - "platformId": "20446254070", - "verified": True, - }, - }, - { - "id": 70160757733, - "platformId": "21516776437_10157334747216438", - "platform": "Facebook", - "date": "2019-09-07 20:00:15", - "updated": "2019-09-08 00:43:36", - "type": "link", - "title": "Trump Lost #Sharpiegate the Moment He Took It Seriously", - "caption": "slate.com", - "description": "There may be a limit to bending reality.", - "message": "Not even Fox News is going along with it.", - "expandedLinks": [ - { - "original": "https://slate.trib.al/gim6IVr", - "expanded": "https://slate.com/news-and-politics/2019/09/trumps-sharpie-defense-hurricane-dorian.html?via=rss_socialflow_facebook", - } - ], - "link": "https://slate.trib.al/gim6IVr", - "postUrl": "https://www.facebook.com/Slate/posts/10157334747216438", - "subscriberCount": 1518914, - "score": 7.38, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCq3vaUbv_4TMBr&w=720&h=720&url=https%3A%2F%2Fcompote.slate.com%2Fimages%2F4fe737a0-b801-46af-a378-286301f8b58f.jpeg%3Fwidth%3D780%26height%3D520%26rect%3D1248x832%26offset%3D0x0&cfs=1&_nc_hash=AQDS81aJDFJLV-Al", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBzEKXqdQ0KNLMY&url=https%3A%2F%2Fcompote.slate.com%2Fimages%2F4fe737a0-b801-46af-a378-286301f8b58f.jpeg%3Fwidth%3D780%26height%3D520%26rect%3D1248x832%26offset%3D0x0&_nc_hash=AQCwx4_3HgJfmfIR", - } - ], - "statistics": { - "actual": { - "likeCount": 162, - "shareCount": 44, - "commentCount": 28, - "loveCount": 11, - "wowCount": 2, - "hahaCount": 118, - "sadCount": 1, - "angryCount": 3, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 10, - "shareCount": 6, - "commentCount": 8, - "loveCount": 3, - "wowCount": 3, - "hahaCount": 4, - "sadCount": 5, - "angryCount": 11, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6631, - "name": "Slate.com", - "handle": "Slate", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/26815412_10155867835401438_6786592847511925697_n.jpg?_nc_cat=1&_nc_oc=AQnlPqxpF8HJHZLBBP9M3JCvr7KRojNU13Gek2aIDlLStNh3FwBSADznEiZCEG1_doE&_nc_ht=scontent.xx&oh=fa5bf2320fbcba9484de00ac7f908e6c&oe=5DC8F5CA", - "subscriberCount": 1518896, - "url": "https://www.facebook.com/21516776437", - "platform": "Facebook", - "platformId": "21516776437", - "verified": True, - }, - }, - { - "id": 70175280754, - "platformId": "25987609066_10156789085049067", - "platform": "Facebook", - "date": "2019-09-08 00:05:04", - "updated": "2019-09-08 00:26:32", - "type": "link", - "title": "Trump says he was about to hold secret talks with the Taliban in the US, but canceled them", - "caption": "nbcnews.com", - "description": " ", - "message": "BREAKING: Days ahead of 9/11 anniversary, Pres. Trump announces that he was set to hold secret talks with the Taliban at Camp David in the US this weekend but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://trib.al/VYsjQl1", - "expandedLinks": [ - { - "original": "https://trib.al/VYsjQl1", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141", - }, - { - "original": "https://trib.al/VYsjQl1", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141", - }, - ], - "link": "https://trib.al/VYsjQl1", - "postUrl": "https://www.facebook.com/therachelmaddowshow/posts/10156789085049067", - "subscriberCount": 2643600, - "score": 7.22234762979684, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 228, - "shareCount": 1043, - "commentCount": 1616, - "loveCount": 2, - "wowCount": 578, - "hahaCount": 1124, - "sadCount": 63, - "angryCount": 1745, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 221, - "shareCount": 184, - "commentCount": 118, - "loveCount": 10, - "wowCount": 31, - "hahaCount": 14, - "sadCount": 43, - "angryCount": 265, - "thankfulCount": 0, - }, - }, - "account": { - "id": 3921, - "name": "The Rachel Maddow Show", - "handle": "therachelmaddowshow", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/560412_10150641324209067_326441500_n.jpg?_nc_cat=1&_nc_oc=AQll8voihNLTZqhNJxHo54RezqGbTpA2ADMeAJ0m1c--3__ynoI3yGrzvpSMzT6QrNI&_nc_ht=scontent.xx&oh=8f8d327dc4a47e1af85f9d3da82d4eb3&oe=5DFB0DA7", - "subscriberCount": 2643600, - "url": "https://www.facebook.com/25987609066", - "platform": "Facebook", - "platformId": "25987609066", - "verified": True, - }, - }, - { - "id": 70161490237, - "platformId": "86680728811_10158783373048812", - "platform": "Facebook", - "date": "2019-09-07 20:10:39", - "updated": "2019-09-08 00:45:25", - "type": "link", - "title": "241 NYPD officers have died from 9/11 illnesses, 10 times the number killed at WTC", - "caption": "abcnews.go.com", - "description": " ", - "message": "To date, 241 members of the NYPD died of 9/11-related illnesses – compared to the 23 killed in the attack on the World Trade Center.", - "expandedLinks": [ - { - "original": "https://abcn.ws/34uNH5d", - "expanded": "https://abcnews.go.com/US/241-nypd-officers-died-911-illnesses-10-times/story?id=65430201&cid=social_fb_abcn", - } - ], - "link": "https://abcn.ws/34uNH5d", - "postUrl": "https://www.facebook.com/ABCNews/posts/10158783373048812", - "subscriberCount": 14195962, - "score": 7.180878552971576, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?w=558&h=558&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FUS%2Fnypd-ceremonial-wall-gty-jc-190906_hpMain_16x9_992.jpg&cfs=1&sx=188&sy=0&sw=558&sh=558&_nc_hash=AQDss5F9oj_ddJeI", - "height": 558, - "width": 558, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBfjZ1NKJj5a-AY&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FUS%2Fnypd-ceremonial-wall-gty-jc-190906_hpMain_16x9_992.jpg&_nc_hash=AQBQPg2M4kC-Vv9c", - } - ], - "statistics": { - "actual": { - "likeCount": 254, - "shareCount": 925, - "commentCount": 104, - "loveCount": 9, - "wowCount": 120, - "hahaCount": 0, - "sadCount": 1324, - "angryCount": 43, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 161, - "shareCount": 77, - "commentCount": 64, - "loveCount": 20, - "wowCount": 20, - "hahaCount": 17, - "sadCount": 17, - "angryCount": 11, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13878, - "name": "ABC News", - "handle": "ABCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02", - "subscriberCount": 14196629, - "url": "https://www.facebook.com/86680728811", - "platform": "Facebook", - "platformId": "86680728811", - "verified": True, - }, - }, - { - "id": 70166864446, - "platformId": "268914272540_10156464339042541", - "platform": "Facebook", - "date": "2019-09-07 22:06:14", - "updated": "2019-09-08 00:30:46", - "type": "link", - "title": "Bianca Andreescu stuns Serena Williams to deny tennis legend a 24th Grand Slam title with straight-sets win in U.S. Open final", - "caption": "nydailynews.com", - "description": "It was a stunning straight-set romp.", - "message": "Bianca Andreescu stuns Serena Williams to deny tennis legend a 24th Grand Slam title with straight-sets win in U.S. Open final Canadian upstart Andreescu, 19, got the best of the American superstar in a convincing 6-3, 7-5 display at the U.S. Open.", - "expandedLinks": [ - { - "original": "https://www.nydailynews.com/sports/more-sports/ny-serena-williams-bianca-andreescu-us-open-final-20190907-6d5z6gyxajftdbwdw2ifgovzde-story.html", - "expanded": "https://www.nydailynews.com/sports/more-sports/ny-serena-williams-bianca-andreescu-us-open-final-20190907-6d5z6gyxajftdbwdw2ifgovzde-story.html", - } - ], - "link": "https://www.nydailynews.com/sports/more-sports/ny-serena-williams-bianca-andreescu-us-open-final-20190907-6d5z6gyxajftdbwdw2ifgovzde-story.html", - "postUrl": "https://www.facebook.com/NYDailyNews/posts/10156464339042541", - "subscriberCount": 3119682, - "score": 7.0894308943089435, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQD4L3C7ugtRqaZf&w=720&h=720&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2F0hvHjy5wJQkhjkPtFeNbb3im7GY%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2F6W6GX2O4NFD6XOIHBY7OJR5DXY.jpg&cfs=1&_nc_hash=AQCtd3fvuMA0bX4a", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBd-FBpxDYozc6-&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2F0hvHjy5wJQkhjkPtFeNbb3im7GY%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2F6W6GX2O4NFD6XOIHBY7OJR5DXY.jpg&_nc_hash=AQCdij0z_1ihuAiP", - } - ], - "statistics": { - "actual": { - "likeCount": 260, - "shareCount": 184, - "commentCount": 135, - "loveCount": 25, - "wowCount": 90, - "hahaCount": 20, - "sadCount": 151, - "angryCount": 7, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 29, - "shareCount": 27, - "commentCount": 22, - "loveCount": 4, - "wowCount": 9, - "hahaCount": 13, - "sadCount": 6, - "angryCount": 13, - "thankfulCount": 0, - }, - }, - "account": { - "id": 18752, - "name": "New York Daily News", - "handle": "NYDailyNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/34963357_10155516739962541_1916910854155010048_n.jpg?_nc_cat=1&_nc_oc=AQmjFK4eo-CK8fL21CSJr1btV3Al6e74byD7EyXVL8apaCEHf5ql7TW_ZRkUiYID0qY&_nc_ht=scontent.xx&oh=e33f579d2d00c6afc68a0e7cbd70b6c8&oe=5E0623E1", - "subscriberCount": 3120017, - "url": "https://www.facebook.com/268914272540", - "platform": "Facebook", - "platformId": "268914272540", - "verified": True, - }, - }, - { - "id": 70161756319, - "platformId": "228735667216_10156998679937217", - "platform": "Facebook", - "date": "2019-09-07 20:25:55", - "updated": "2019-09-08 00:25:08", - "type": "link", - "title": "Amber Rudd resigns from cabinet", - "caption": "bbc.com", - "description": " ", - "message": "UK Work and Pensions Secretary Amber Rudd resigns from the government saying she cannot stand by while loyal Conservatives are expelled.", - "expandedLinks": [ - { - "original": "https://bbc.in/34y74KD", - "expanded": "https://www.bbc.com/news/uk-politics-49623737?ns_mchannel=social&ns_campaign=bbcnews&ns_source=facebook&ocid=socialflow_facebook&fbclid=IwAR0Y5wtV7XAYMOy9za8iQR-hQ_ed0zhqD9ieNWR91SwEzNn0f9V8qZBpAZ8", - } - ], - "link": "https://bbc.in/34y74KD", - "postUrl": "https://www.facebook.com/bbcnews/posts/10156998679937217", - "subscriberCount": 49392159, - "score": 6.990017615971815, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDVrCPNmQJJR5cv&w=720&h=720&url=https%3A%2F%2Fichef.bbci.co.uk%2Fnews%2F1024%2Fbranded_news%2F7A23%2Fproduction%2F_97176213_breaking_news_bigger.png&cfs=1&_nc_hash=AQDn5vmvnE_HCobw", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCC5QUIIVk7Ey9w&url=https%3A%2F%2Fichef.bbci.co.uk%2Fnews%2F1024%2Fbranded_news%2F7A23%2Fproduction%2F_97176213_breaking_news_bigger.png&_nc_hash=AQDIOh8BQjgML1G9", - } - ], - "statistics": { - "actual": { - "likeCount": 5261, - "shareCount": 1809, - "commentCount": 2123, - "loveCount": 298, - "wowCount": 724, - "hahaCount": 1589, - "sadCount": 57, - "angryCount": 43, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 876, - "shareCount": 225, - "commentCount": 277, - "loveCount": 45, - "wowCount": 77, - "hahaCount": 93, - "sadCount": 63, - "angryCount": 47, - "thankfulCount": 0, - }, - }, - "account": { - "id": 16403, - "name": "BBC News", - "handle": "bbcnews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/67191311_10156857876272217_4342089529688064000_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQk5kAdrSFMzze_w-lzADmQENwckqsjInhGPXnxTYNgxJpQ7siiGF44i0wivzxfUmPw&_nc_ht=scontent.xx&oh=5b9721d79e733db34cd496e566100993&oe=5DF5BFA1", - "subscriberCount": 49397882, - "url": "https://www.facebook.com/228735667216", - "platform": "Facebook", - "platformId": "228735667216", - "verified": True, - }, - }, - { - "id": 70173549413, - "platformId": "219367258105115_3290988857609591", - "platform": "Facebook", - "date": "2019-09-07 23:34:33", - "updated": "2019-09-08 00:41:04", - "type": "link", - "title": "Trump says Taliban leaders were coming to the US for a Camp David meeting but he canceled it", - "caption": "cnn.com", - "description": " ", - "message": "BREAKING: President Trump says he canceled a secret meeting at Camp David tomorrow with Taliban leaders and Afghanistan's President.", - "expandedLinks": [ - { - "original": "https://cnn.it/2LwOn1r", - "expanded": "https://www.cnn.com/2019/09/07/politics/trump-cancels-secret-meeting-taliban-afghanistan-president/index.html?utm_content=2019-09-07T23%3A34%3A31&utm_source=fbCNNp&utm_medium=social&utm_term=link", - } - ], - "link": "https://cnn.it/2LwOn1r", - "postUrl": "https://www.facebook.com/cnnpolitics/posts/3290988857609591", - "subscriberCount": 2855492, - "score": 6.918644067796611, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQAWT0uLyIBSF_0v&w=619&h=619&url=https%3A%2F%2Fcdn.cnn.com%2Fcnnnext%2Fdam%2Fassets%2F190905172550-02-trump-medal-of-freedom-0905-super-tease.jpg&cfs=1&sx=184&sy=0&sw=619&sh=619&_nc_hash=AQD2RHNKQazn6ZvL", - "height": 619, - "width": 619, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBndecYvtod0WTA&url=https%3A%2F%2Fcdn.cnn.com%2Fcnnnext%2Fdam%2Fassets%2F190905172550-02-trump-medal-of-freedom-0905-super-tease.jpg&_nc_hash=AQC-DN0i4-h1BxQZ", - } - ], - "statistics": { - "actual": { - "likeCount": 108, - "shareCount": 422, - "commentCount": 601, - "loveCount": 3, - "wowCount": 120, - "hahaCount": 575, - "sadCount": 10, - "angryCount": 202, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 75, - "shareCount": 40, - "commentCount": 99, - "loveCount": 10, - "wowCount": 7, - "hahaCount": 27, - "sadCount": 7, - "angryCount": 30, - "thankfulCount": 0, - }, - }, - "account": { - "id": 19471, - "name": "CNN Politics", - "handle": "cnnpolitics", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22450067_1835100979865060_6024097554775073207_n.png?_nc_cat=1&_nc_oc=AQmpWGKTrzg30Lmmy5ncZ5txlFyDirtObkp2leejFgez6t02RAflIlctecGiymX0NU8&_nc_ht=scontent.xx&oh=bbc41bdb10ef689246595025fc23b309&oe=5E070315", - "subscriberCount": 2855693, - "url": "https://www.facebook.com/219367258105115", - "platform": "Facebook", - "platformId": "219367258105115", - "verified": True, - }, - }, - { - "id": 70159788131, - "platformId": "210277954204_10156789441434205", - "platform": "Facebook", - "date": "2019-09-07 19:41:48", - "updated": "2019-09-08 00:39:40", - "type": "youtube", - "caption": "youtube.com", - "description": "Donald Trump can't handle Alexandria Ocasio-Cortez and The Squad; here is a perfect example. John Iadarola and Jayar Jackson break it down on The Damage Repo...", - "expandedLinks": [ - { - "original": "https://www.youtube.com/watch?v=TjVYOzfrpkc&feature=youtu.be", - "expanded": "https://www.youtube.com/watch?v=TjVYOzfrpkc&feature=youtu.be", - } - ], - "link": "https://www.youtube.com/watch?v=TjVYOzfrpkc&feature=youtu.be", - "postUrl": "https://www.facebook.com/TheYoungTurks/posts/10156789441434205", - "subscriberCount": 2099948, - "score": 6.682242990654205, - "media": [ - { - "type": "video", - "url": "https://www.youtube.com/embed/TjVYOzfrpkc?autoplay=1", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDcJ1B_OysiE_LK&w=720&h=720&url=https%3A%2F%2Fi.ytimg.com%2Fvi%2FTjVYOzfrpkc%2Fmaxresdefault.jpg&cfs=1&sx=261&sy=0&sw=720&sh=720&_nc_hash=AQBiAQ0_vQwsDcOq", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBgnnPU_l3lgrgU&w=1280&h=720&url=https%3A%2F%2Fi.ytimg.com%2Fvi%2FTjVYOzfrpkc%2Fmaxresdefault.jpg&crop&sx=0&sy=0&sw=1280&sh=720&_nc_hash=AQApjZWx0m_zKiJR", - }, - ], - "statistics": { - "actual": { - "likeCount": 305, - "shareCount": 87, - "commentCount": 45, - "loveCount": 56, - "wowCount": 2, - "hahaCount": 217, - "sadCount": 1, - "angryCount": 2, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 33, - "shareCount": 17, - "commentCount": 19, - "loveCount": 7, - "wowCount": 3, - "hahaCount": 15, - "sadCount": 4, - "angryCount": 9, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6786, - "name": "The Young Turks", - "handle": "TheYoungTurks", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1003713_10151543513399205_523422522_n.jpg?_nc_cat=1&_nc_oc=AQnnXFBTIz-GDK79X4ZL1tWD8ZS5F3y_makkEyxpcCf_7U3QmoBvJjb9aWlpiMT8dro&_nc_ht=scontent.xx&oh=5684bdb9a01611f4ca6e9ea9dedbc57e&oe=5DF64CB5", - "subscriberCount": 2100186, - "url": "https://www.facebook.com/210277954204", - "platform": "Facebook", - "platformId": "210277954204", - "verified": True, - }, - }, - { - "id": 70162969192, - "platformId": "140738092630206_2613192798718044", - "platform": "Facebook", - "date": "2019-09-07 20:50:02", - "updated": "2019-09-08 00:19:09", - "type": "link", - "title": "Bystanders taunted and laughed as police officers were being fired upon in Philadelphia", - "caption": "theblaze.com", - "description": '"A major moment of disappointment..."', - "message": "Just sad.", - "expandedLinks": [ - { - "original": "https://bit.ly/2A1JS9y", - "expanded": "https://www.theblaze.com/news/philadelphia-bystanders-mocked-police-during-shooting?utm_content=buffer63598&utm_medium=organic&utm_source=facebook&utm_campaign=fb-theblaze", - } - ], - "link": "https://bit.ly/2A1JS9y", - "postUrl": "https://www.facebook.com/TheBlaze/posts/2613192798718044", - "subscriberCount": 2089159, - "score": 6.564102564102564, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQB1eQyjd6fQFb9f&w=720&h=720&url=https%3A%2F%2Ftheblaze-img.rbl.ms%2Fsimage%2Fhttps%253A%252F%252Fassets.rbl.ms%252F20567472%252F1200x600.jpg%2F2000%252C2000%2FDU%252BXWLoZnuoOJS6M%2Fimg.jpg&cfs=1&sx=460&sy=0&sw=1000&sh=1000&_nc_hash=AQDBlQqCoMv6kpnn", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAaCa9qpoHlPzOt&url=https%3A%2F%2Ftheblaze-img.rbl.ms%2Fsimage%2Fhttps%253A%252F%252Fassets.rbl.ms%252F20567472%252F1200x600.jpg%2F2000%252C2000%2FDU%252BXWLoZnuoOJS6M%2Fimg.jpg&_nc_hash=AQB6ITZyK5kS6QhT", - } - ], - "statistics": { - "actual": { - "likeCount": 153, - "shareCount": 675, - "commentCount": 245, - "loveCount": 2, - "wowCount": 31, - "hahaCount": 9, - "sadCount": 255, - "angryCount": 1190, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 71, - "shareCount": 71, - "commentCount": 99, - "loveCount": 5, - "wowCount": 18, - "hahaCount": 46, - "sadCount": 9, - "angryCount": 71, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6892, - "name": "TheBlaze", - "handle": "TheBlaze", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/47350623_2141870595850269_7864140219111440384_n.png?_nc_cat=1&_nc_oc=AQmGyVQswjmmaInAkgMKbLJ62jAcb2BShbL78435-MqCEBLedhKr7VO97Nzxt2x220k&_nc_ht=scontent.xx&oh=4a5ce0b44b6400aab9bb78aa2afdee87&oe=5E011864", - "subscriberCount": 2089166, - "url": "https://www.facebook.com/140738092630206", - "platform": "Facebook", - "platformId": "140738092630206", - "verified": True, - }, - }, - { - "id": 70175231621, - "platformId": "167115176655082_2995005573866014", - "platform": "Facebook", - "date": "2019-09-08 00:00:02", - "updated": "2019-09-08 00:25:19", - "type": "link", - "title": "Experts Want to Give Control of America's Nuclear Missiles to AI", - "caption": "vice.com", - "description": "If America is attacked with a nuclear bomb, artificial intelligence would automatically fire back even if we are all dead.", - "message": "This is a terrible idea.", - "expandedLinks": [ - { - "original": "https://www.vice.com/en_us/article/59n3y5/experts-want-to-give-control-of-americas-nuclear-missiles-to-ai?utm_source=vicefbus", - "expanded": "https://www.vice.com/en_us/article/59n3y5/experts-want-to-give-control-of-americas-nuclear-missiles-to-ai?utm_source=vicefbus", - } - ], - "link": "https://www.vice.com/en_us/article/59n3y5/experts-want-to-give-control-of-americas-nuclear-missiles-to-ai?utm_source=vicefbus", - "postUrl": "https://www.facebook.com/VICE/posts/2995005573866014", - "subscriberCount": 8174144, - "score": 6.5423728813559325, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQApMNUEyhs8JYQj&w=720&h=720&url=https%3A%2F%2Fvideo-images.vice.com%2Farticles%2F5d6ea91f390935000a9a7623%2Flede%2F1567533843082-GettyImages-525450811.jpeg%3Fcrop%3D1xw%3A0.999873031995937xh%3Bcenter%2Ccenter%26resize%3D1200%3A%2A&cfs=1&_nc_hash=AQANsmy8nKq7WOUE", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCAk4P30RJ3YPGZ&url=https%3A%2F%2Fvideo-images.vice.com%2Farticles%2F5d6ea91f390935000a9a7623%2Flede%2F1567533843082-GettyImages-525450811.jpeg%3Fcrop%3D1xw%3A0.999873031995937xh%3Bcenter%2Ccenter%26resize%3D1200%3A%2A&_nc_hash=AQBolN324-Kx3RDV", - } - ], - "statistics": { - "actual": { - "likeCount": 47, - "shareCount": 93, - "commentCount": 119, - "loveCount": 4, - "wowCount": 14, - "hahaCount": 55, - "sadCount": 4, - "angryCount": 50, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 18, - "shareCount": 8, - "commentCount": 13, - "loveCount": 4, - "wowCount": 4, - "hahaCount": 7, - "sadCount": 2, - "angryCount": 3, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6646, - "name": "VICE", - "handle": "VICE", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13427861_1304295039603751_2178102892370936049_n.jpg?_nc_cat=1&_nc_oc=AQmzoEUjC5BCCMVSsIFvWa52KGr3Iqh9f0Y_eezqYMFw7h_EUam7WQdYxEFvJB6LoP0&_nc_ht=scontent.xx&oh=847f8eb6c5132c90382bc0940afbc692&oe=5E02C5BA", - "subscriberCount": 8177544, - "url": "https://www.facebook.com/167115176655082", - "platform": "Facebook", - "platformId": "167115176655082", - "verified": True, - }, - }, - { - "id": 70157699253, - "platformId": "15704546335_10158452982671336", - "platform": "Facebook", - "date": "2019-09-07 19:00:06", - "updated": "2019-09-08 00:36:22", - "type": "link", - "title": "University of Tennessee turns bullied elementary school student's t-shirt design into official apparel", - "caption": "foxnews.com", - "description": "The young University of Tennessee fan that was bullied last week for the homemade shirt he wore to his school’s collegiate day was shown an outpouring of support that has since inspired the university to make his design into an official piece of apparel.", - "message": "A University of Tennessee fan that was bullied for his homemade shirt has inspired the university to make his design into an official piece of apparel.", - "expandedLinks": [ - { - "original": "https://www.foxnews.com/us/university-of-tennessee-bullied-student-t-shirt-design", - "expanded": "https://www.foxnews.com/us/university-of-tennessee-bullied-student-t-shirt-design", - } - ], - "link": "https://www.foxnews.com/us/university-of-tennessee-bullied-student-t-shirt-design", - "postUrl": "https://www.facebook.com/FoxNews/posts/10158452982671336", - "subscriberCount": 17162352, - "score": 6.431667403803627, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBK4qdC7-IEHKlP&w=720&h=720&url=https%3A%2F%2Fstatic.foxnews.com%2Ffoxnews.com%2Fcontent%2Fuploads%2F2019%2F09%2FVOLS.jpg&cfs=1&_nc_hash=AQABy0BtCZZ24GnU", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCfDxKseiGNUbrW&url=https%3A%2F%2Fstatic.foxnews.com%2Ffoxnews.com%2Fcontent%2Fuploads%2F2019%2F09%2FVOLS.jpg&_nc_hash=AQAW52maPsiatQPC", - } - ], - "statistics": { - "actual": { - "likeCount": 8315, - "shareCount": 2003, - "commentCount": 552, - "loveCount": 3573, - "wowCount": 43, - "hahaCount": 48, - "sadCount": 4, - "angryCount": 4, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 730, - "shareCount": 391, - "commentCount": 635, - "loveCount": 69, - "wowCount": 107, - "hahaCount": 130, - "sadCount": 52, - "angryCount": 147, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6897, - "name": "Fox News", - "handle": "FoxNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22519337_10156158270486336_6810712156586627746_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlXNhWwb8bfCyDXwZo8N1dsslewpEwDTilUVrDkK4ie4qoq_SHj__a9Ws-O0Hsa97M&_nc_ht=scontent.xx&oh=485819e2e49151fcf033722359d3e1a7&oe=5DFF0F55", - "subscriberCount": 17163279, - "url": "https://www.facebook.com/15704546335", - "platform": "Facebook", - "platformId": "15704546335", - "verified": True, - }, - }, - { - "id": 70175836725, - "platformId": "475549362567960_2304081899714688", - "platform": "Facebook", - "date": "2019-09-08 00:00:13", - "updated": "2019-09-08 00:40:20", - "type": "native_video", - "description": 'Socialist Rep. Alexandria Ocasio-Cortez (D-N.Y.) gets a hard lesson on free speech from a FED-UP Trump supporter.\n\n"You\'re bringing incitement and hate speech together, which is totally absurd and fascist!" 🔥🔥🔥', - "message": '"You\'re bringing incitement and hate speech together, which is totally absurd and fascist!" 🔥🔥🔥', - "expandedLinks": [ - { - "original": "https://www.facebook.com/RantNationBlazeTV/videos/464956854293046/", - "expanded": "https://www.facebook.com/RantNationBlazeTV/videos/464956854293046/", - } - ], - "link": "https://www.facebook.com/RantNationBlazeTV/videos/464956854293046/", - "postUrl": "https://www.facebook.com/ConservativeReview/posts/2304081899714688", - "subscriberCount": 1534190, - "score": 6.382978723404255, - "media": [ - { - "type": "video", - "url": "https://video.xx.fbcdn.net/v/t42.9040-2/61970984_386455858745544_3407560622387232768_n.mp4?_nc_cat=100&efg=eyJybHIiOjM4OCwicmxhIjoxMjU0LCJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_oc=AQn_aFAILPRkh2IRUK8n3eD_2E-dd1HgkPtOeVszpkraWTkobKQ6qsDQtgjy6Fb5KdI&rl=388&vabr=216&_nc_ht=video.xx&oh=1394caeb743e2e2e01dd18b9fd97e2bf&oe=5D768C8B", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t15.5256-10/s720x720/60887318_389396874998383_6277103585096368128_n.jpg?_nc_cat=107&_nc_oc=AQmaErj0jsp9aj0ykhWYX6QQUirRB-KbJhG71aByptbfEBbJ2wtnzwWBVljlckXSlMY&_nc_ht=scontent.xx&oh=827d6e3a1fe6a8feef392c2bdfbee2bf&oe=5E11CCF4", - "height": 405, - "width": 720, - "full": "https://scontent.xx.fbcdn.net/v/t15.5256-10/60887318_389396874998383_6277103585096368128_n.jpg?_nc_cat=107&_nc_oc=AQmaErj0jsp9aj0ykhWYX6QQUirRB-KbJhG71aByptbfEBbJ2wtnzwWBVljlckXSlMY&_nc_ht=scontent.xx&oh=e6c5f31f225cfa18771e516e829b686f&oe=5E0AB3BC", - }, - ], - "statistics": { - "actual": { - "likeCount": 329, - "shareCount": 93, - "commentCount": 62, - "loveCount": 43, - "wowCount": 4, - "hahaCount": 66, - "sadCount": 0, - "angryCount": 3, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 44, - "shareCount": 17, - "commentCount": 9, - "loveCount": 5, - "wowCount": 4, - "hahaCount": 4, - "sadCount": 3, - "angryCount": 8, - "thankfulCount": 0, - }, - }, - "account": { - "id": 400323, - "name": "Conservative Review", - "handle": "ConservativeReview", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13511965_914218092034416_3120935637616696621_n.png?_nc_cat=1&_nc_oc=AQmwEB1vd_Ss605EjbSWDYSwe1KWV44N7RpDVd5HB--hRPK-_AmZmVSl498baoafRac&_nc_ht=scontent.xx&oh=1ea31cc2116c322669c75f2180fc2684&oe=5E01B826", - "subscriberCount": 1534190, - "url": "https://www.facebook.com/475549362567960", - "platform": "Facebook", - "platformId": "475549362567960", - "verified": True, - }, - "videoLengthMS": 309779, - }, - { - "id": 70158011942, - "platformId": "86680728811_10158783168978812", - "platform": "Facebook", - "date": "2019-09-07 19:03:29", - "updated": "2019-09-08 00:45:25", - "type": "native_video", - "message": "More than a thousand Bahamians evacuated by the Grand Celebration cruise ship arrived in Palm beach, Florida, after it dropped more than 112 tons of supplies in Freeport, Grand Bahama in the wake of Hurricane Dorian's catastrophic damage to the Bahamas. https://abcn.ws/34FavPY", - "expandedLinks": [ - { - "original": "https://abcn.ws/34FavPY", - "expanded": "https://abcnews.go.com/International/images-reveal-devastating-effects-hurricane-dorian-bahamas/story?id=65430705", - }, - { - "original": "https://www.facebook.com/ABCNews/videos/813554229059535/", - "expanded": "https://www.facebook.com/ABCNews/videos/813554229059535/", - }, - ], - "link": "https://www.facebook.com/ABCNews/videos/813554229059535/", - "postUrl": "https://www.facebook.com/ABCNews/posts/10158783168978812", - "subscriberCount": 14195962, - "score": 6.30638852672751, - "media": [ - { - "type": "video", - "url": "https://video.xx.fbcdn.net/v/t42.9040-2/70819311_877173089317895_5183022024343158784_n.mp4?_nc_cat=106&efg=eyJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_log=1&_nc_oc=AQle7QTGkM8MnwJnEwTCnpIZwxY3Ruf6nhgWjKGydds6rYEyE7prcVjih77vpSsTMwk&_nc_ht=video.xx&oh=26d059466ad9324be438eca5971408ef&oe=5D75A7BF", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t15.5256-10/p720x720/67596431_813554819059476_4503891397383815168_n.jpg?_nc_cat=110&_nc_log=1&_nc_oc=AQl4gV72rTeimEX1BaVB5cz-y64moZRDErM1FnmHeyVS1eCNtOZvSyLCgfFH-isp_fI&_nc_ht=scontent.xx&oh=88bd30dabcf261b63a9f9a7b06ed1e58&oe=5E0E1C73", - "height": 720, - "width": 720, - "full": "https://scontent.xx.fbcdn.net/v/t15.5256-10/67596431_813554819059476_4503891397383815168_n.jpg?_nc_cat=110&_nc_log=1&_nc_oc=AQl4gV72rTeimEX1BaVB5cz-y64moZRDErM1FnmHeyVS1eCNtOZvSyLCgfFH-isp_fI&_nc_ht=scontent.xx&oh=86a02d83d45ba512d2b117ac49cc277a&oe=5DF45C28", - }, - ], - "statistics": { - "actual": { - "likeCount": 2654, - "shareCount": 871, - "commentCount": 300, - "loveCount": 880, - "wowCount": 107, - "hahaCount": 1, - "sadCount": 23, - "angryCount": 1, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 247, - "shareCount": 212, - "commentCount": 103, - "loveCount": 17, - "wowCount": 104, - "hahaCount": 8, - "sadCount": 72, - "angryCount": 4, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13878, - "name": "ABC News", - "handle": "ABCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02", - "subscriberCount": 14196629, - "url": "https://www.facebook.com/86680728811", - "platform": "Facebook", - "platformId": "86680728811", - "verified": True, - }, - "videoLengthMS": 38761, - }, - { - "id": 70170110679, - "platformId": "809836039361670_970006750011264", - "platform": "Facebook", - "date": "2019-09-07 22:55:45", - "updated": "2019-09-08 00:27:14", - "type": "photo", - "message": "Divided government, North Dakota style. Congratulations Governor Doug Burgum on the Bison win!", - "expandedLinks": [ - { - "original": "https://www.facebook.com/RepArmstrongND/photos/a.811676579177616/970004493344823/?type=3", - "expanded": "https://www.facebook.com/RepArmstrongND/photos/a.811676579177616/970004493344823/?type=3", - } - ], - "link": "https://www.facebook.com/RepArmstrongND/photos/a.811676579177616/970004493344823/?type=3", - "postUrl": "https://www.facebook.com/RepArmstrongND/posts/970006750011264", - "subscriberCount": 2971, - "score": 6.275862068965517, - "media": [ - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t1.0-9/s720x720/69756298_970004496678156_6907594845594845184_n.jpg?_nc_cat=102&_nc_oc=AQkJ7Ki9PssoDAIvRWj_h-45qslAaM7D9V8I9ivdG17oVKUlDRM6Z94TXRMxFKHZNHM&_nc_ht=scontent.xx&oh=ee7139bf36eb3b3270a2cc6cf27cc1f6&oe=5DFA7146", - "height": 720, - "width": 540, - "full": "https://scontent.xx.fbcdn.net/v/t1.0-9/s720x720/69756298_970004496678156_6907594845594845184_n.jpg?_nc_cat=102&_nc_oc=AQkJ7Ki9PssoDAIvRWj_h-45qslAaM7D9V8I9ivdG17oVKUlDRM6Z94TXRMxFKHZNHM&_nc_ht=scontent.xx&oh=ee7139bf36eb3b3270a2cc6cf27cc1f6&oe=5DFA7146", - } - ], - "statistics": { - "actual": { - "likeCount": 149, - "shareCount": 3, - "commentCount": 9, - "loveCount": 10, - "wowCount": 0, - "hahaCount": 11, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 15, - "shareCount": 2, - "commentCount": 3, - "loveCount": 2, - "wowCount": 4, - "hahaCount": 1, - "sadCount": 0, - "angryCount": 2, - "thankfulCount": 0, - }, - }, - "account": { - "id": 5599817, - "name": "Congressman Kelly Armstrong", - "handle": "RepArmstrongND", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49948250_810468759298398_5015266941133127680_n.jpg?_nc_cat=108&_nc_oc=AQnxIy4F6VXgzgmGwJSNyp_LXl4eUeIRidDRPiwritTvefyha72Fn3uQZ5Ig3w8CUgE&_nc_ht=scontent.xx&oh=1a6d324e012fcd21703991138eef4fd0&oe=5DFFD242", - "subscriberCount": 2985, - "url": "https://www.facebook.com/809836039361670", - "platform": "Facebook", - "platformId": "809836039361670", - "verified": True, - }, - }, - { - "id": 70174684045, - "platformId": "86680728811_10158783960173812", - "platform": "Facebook", - "date": "2019-09-07 23:56:22", - "updated": "2019-09-08 00:45:25", - "type": "link", - "title": "Trump calls off secret meeting with Taliban, Afghan leaders", - "caption": "abcnews.go.com", - "description": " ", - "message": "Pres. Donald Trump calls off a secret Camp David meeting with Taliban and Afghanistan leaders.", - "expandedLinks": [ - { - "original": "https://abcn.ws/2LxGetr", - "expanded": "https://abcnews.go.com/Politics/wireStory/trump-calls-off-secret-meeting-taliban-afghan-leaders-65458544?cid=social_fb_abcn", - } - ], - "link": "https://abcn.ws/2LxGetr", - "postUrl": "https://www.facebook.com/ABCNews/posts/10158783960173812", - "subscriberCount": 14195962, - "score": 6.206896551724138, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDQCdBbCnf161B-&w=720&h=720&url=https%3A%2F%2Fs3.amazonaws.com%2Fprod-cust-photo-posts-jfaikqealaka%2F3316-1217482569dd6b4ec4429254b58a2a06.jpg&cfs=1&_nc_hash=AQBWsanQJrG_OzeB", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCdbu41auqfYy4v&url=https%3A%2F%2Fs3.amazonaws.com%2Fprod-cust-photo-posts-jfaikqealaka%2F3316-1217482569dd6b4ec4429254b58a2a06.jpg&_nc_hash=AQDv6mnngddET3gP", - } - ], - "statistics": { - "actual": { - "likeCount": 237, - "shareCount": 199, - "commentCount": 529, - "loveCount": 12, - "wowCount": 99, - "hahaCount": 477, - "sadCount": 11, - "angryCount": 56, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 106, - "shareCount": 49, - "commentCount": 46, - "loveCount": 13, - "wowCount": 14, - "hahaCount": 12, - "sadCount": 13, - "angryCount": 8, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13878, - "name": "ABC News", - "handle": "ABCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02", - "subscriberCount": 14196629, - "url": "https://www.facebook.com/86680728811", - "platform": "Facebook", - "platformId": "86680728811", - "verified": True, - }, - }, - { - "id": 70170406994, - "platformId": "182919686769_10156515199261770", - "platform": "Facebook", - "date": "2019-09-07 23:00:15", - "updated": "2019-09-08 00:40:44", - "type": "link", - "title": "Trump Tweets Praise From Border Patrol Union President After Mexico Announces Massive Drop In Migrant Arrivals", - "caption": "dailycaller.com", - "description": 'Donald Trump tweeted a quote Saturday morning from National Border Patrol Council president Brandon Judd lauding Mexico as a "true Border Security Partner."', - "message": "Yuge!", - "expandedLinks": [ - { - "original": "https://dailycaller.com/2019/09/07/trump-mexico-drop-migrant-border/", - "expanded": "https://dailycaller.com/2019/09/07/trump-mexico-drop-migrant-border/", - } - ], - "link": "https://dailycaller.com/2019/09/07/trump-mexico-drop-migrant-border/", - "postUrl": "https://www.facebook.com/DailyCaller/posts/10156515199261770", - "subscriberCount": 5408428, - "score": 6.142118863049095, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDv-0rr39_tiNQG&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73dd29c6f0a001ba5b1f43%2Fd6b21b289a461b60f8abcdf7f7f4df99ce425025_df2ee53b3e8f0b1976783e6fc45fe7ddf70c493d_facebook&cfs=1&_nc_hash=AQAr645fhuWZwd4v", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQA4QIFRa_STKnzo&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73dd29c6f0a001ba5b1f43%2Fd6b21b289a461b60f8abcdf7f7f4df99ce425025_df2ee53b3e8f0b1976783e6fc45fe7ddf70c493d_facebook&_nc_hash=AQCXE7dGq6zEOF_J", - } - ], - "statistics": { - "actual": { - "likeCount": 1868, - "shareCount": 245, - "commentCount": 92, - "loveCount": 161, - "wowCount": 4, - "hahaCount": 4, - "sadCount": 1, - "angryCount": 2, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 112, - "shareCount": 54, - "commentCount": 108, - "loveCount": 8, - "wowCount": 15, - "hahaCount": 47, - "sadCount": 7, - "angryCount": 36, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13489, - "name": "The Daily Caller", - "handle": "DailyCaller", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471", - "subscriberCount": 5408115, - "url": "https://www.facebook.com/182919686769", - "platform": "Facebook", - "platformId": "182919686769", - "verified": True, - }, - }, - { - "id": 70168125843, - "platformId": "86680728811_10158783746343812", - "platform": "Facebook", - "date": "2019-09-07 22:24:58", - "updated": "2019-09-08 00:45:25", - "type": "link", - "title": "Canadian Bianca Andreescu beats Serena Williams to win the U.S. Open", - "caption": "abcnews.go.com", - "description": " ", - "message": "Canadian Bianca Andreescu beat U.S. tennis star Serena Williams in straight sets to win the U.S. Open womens' finals on Saturday.", - "expandedLinks": [ - { - "original": "https://abcn.ws/2LsasxS", - "expanded": "https://abcnews.go.com/GMA/Culture/canadian-bianca-andreescu-beats-serena-williams-win-us/story?id=64291644&cid=social_fb_abcn", - } - ], - "link": "https://abcn.ws/2LsasxS", - "postUrl": "https://www.facebook.com/ABCNews/posts/10158783746343812", - "subscriberCount": 14195962, - "score": 6.134556574923548, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCSeL5RfwH3NRrE&w=558&h=558&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FUS%2Fserena-04-as-epa-190908_hpMain_16x9_992.jpg&cfs=1&sx=334&sy=0&sw=558&sh=558&_nc_hash=AQC2o8CMuGiSX_Ji", - "height": 558, - "width": 558, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDhKzi-HhJI-7m8&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FUS%2Fserena-04-as-epa-190908_hpMain_16x9_992.jpg&_nc_hash=AQDwy9HVRXiDJ_EN", - } - ], - "statistics": { - "actual": { - "likeCount": 868, - "shareCount": 213, - "commentCount": 244, - "loveCount": 114, - "wowCount": 229, - "hahaCount": 58, - "sadCount": 277, - "angryCount": 3, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 133, - "shareCount": 63, - "commentCount": 57, - "loveCount": 16, - "wowCount": 17, - "hahaCount": 15, - "sadCount": 16, - "angryCount": 10, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13878, - "name": "ABC News", - "handle": "ABCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02", - "subscriberCount": 14196629, - "url": "https://www.facebook.com/86680728811", - "platform": "Facebook", - "platformId": "86680728811", - "verified": True, - }, - }, - { - "id": 70161254809, - "platformId": "123624513983_10157817449458984", - "platform": "Facebook", - "date": "2019-09-07 20:15:00", - "updated": "2019-09-08 00:38:24", - "type": "photo", - "expandedLinks": [ - { - "original": "https://www.facebook.com/WesternJournal/photos/a.10150384918003984/10157817449183984/?type=3", - "expanded": "https://www.facebook.com/WesternJournal/photos/a.10150384918003984/10157817449183984/?type=3", - } - ], - "link": "https://www.facebook.com/WesternJournal/photos/a.10150384918003984/10157817449183984/?type=3", - "postUrl": "https://www.facebook.com/WesternJournal/posts/10157817449458984", - "subscriberCount": 5185113, - "score": 6.1133023975251355, - "media": [ - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t1.0-9/p720x720/69919332_10157817449193984_4486464700724281344_n.jpg?_nc_cat=111&_nc_oc=AQliMBxdazxx-1thEwmLC_H1WPeTy2OADXEU6yi2-3sizvlCtytAR-mlCvsY6YrfGhc&_nc_ht=scontent.xx&oh=94ddf879177de28b11fbd50ea2839715&oe=5E01E4FD", - "height": 720, - "width": 720, - "full": "https://scontent.xx.fbcdn.net/v/t1.0-9/p720x720/69919332_10157817449193984_4486464700724281344_n.jpg?_nc_cat=111&_nc_oc=AQliMBxdazxx-1thEwmLC_H1WPeTy2OADXEU6yi2-3sizvlCtytAR-mlCvsY6YrfGhc&_nc_ht=scontent.xx&oh=94ddf879177de28b11fbd50ea2839715&oe=5E01E4FD", - } - ], - "statistics": { - "actual": { - "likeCount": 18299, - "shareCount": 10102, - "commentCount": 968, - "loveCount": 2038, - "wowCount": 43, - "hahaCount": 134, - "sadCount": 12, - "angryCount": 22, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 3703, - "shareCount": 902, - "commentCount": 270, - "loveCount": 183, - "wowCount": 19, - "hahaCount": 47, - "sadCount": 16, - "angryCount": 32, - "thankfulCount": 0, - }, - }, - "account": { - "id": 93420, - "name": "The Western Journal", - "handle": "WesternJournal", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49664345_10157205261148984_1256195277434388480_n.png?_nc_cat=1&_nc_oc=AQkUo1GJrlGqxXcfjFxGkOcXookw_tgn8qATXCSI0ICt6sibuBdTtyIuuWj9iPLw5ZM&_nc_ht=scontent.xx&oh=bf010f921f678fbb0032a465900b5f24&oe=5DF8F16D", - "subscriberCount": 5184899, - "url": "https://www.facebook.com/123624513983", - "platform": "Facebook", - "platformId": "123624513983", - "verified": True, - }, - }, - { - "id": 70160858713, - "platformId": "114945745226947_2335472573174242", - "platform": "Facebook", - "date": "2019-09-07 20:00:51", - "updated": "2019-09-08 00:40:02", - "type": "link", - "title": "Celebrity cruise ship reroutes in Bahamas to deliver food, aid after Dorian", - "caption": "nbcnews.com", - "description": " ", - "message": "The Celebrity Equinox's kitchen staff is preparing 10,000 meals with guests even pitching in to help in the aftermath of the hurricane.", - "expandedLinks": [ - { - "original": "https://on.msnbc.com/2LxhrFT", - "expanded": "https://www.nbcnews.com/news/world/after-hurricane-dorian-celebrity-cruise-ship-bahamas-reroutes-deliver-food-n1050796?cid=sm_npd_ms_fb_lw", - } - ], - "link": "https://on.msnbc.com/2LxhrFT", - "postUrl": "https://www.facebook.com/thelastword/posts/2335472573174242", - "subscriberCount": 515865, - "score": 6.0418604651162795, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCl0TaY2DIEK806&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2999941%2F190906-celebrity-equinox-hurricane-relief-al-1053jpg_692e60a6dbb531606db72afafb2b2440.nbcnews-fp-1200-630.jpg&cfs=1&sx=570&sy=0&sw=630&sh=630&_nc_hash=AQAItXWulq2vZygr", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBNaHxGrar-t0VK&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2999941%2F190906-celebrity-equinox-hurricane-relief-al-1053jpg_692e60a6dbb531606db72afafb2b2440.nbcnews-fp-1200-630.jpg&_nc_hash=AQDJ5XFXZ3aZb6GG", - } - ], - "statistics": { - "actual": { - "likeCount": 768, - "shareCount": 234, - "commentCount": 26, - "loveCount": 266, - "wowCount": 3, - "hahaCount": 1, - "sadCount": 1, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 43, - "shareCount": 34, - "commentCount": 42, - "loveCount": 8, - "wowCount": 11, - "hahaCount": 8, - "sadCount": 13, - "angryCount": 56, - "thankfulCount": 0, - }, - }, - "account": { - "id": 4004, - "name": "The Last Word With Lawrence O'Donnell", - "handle": "thelastword", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16114622_1184240434964134_5160717321521180833_n.png?_nc_cat=1&_nc_oc=AQkE59Us5gvqt0N90qJZW6XSCRHGK5YGgwcB-G1YctCjO7mmMEWXfrnnaX-jZYV633o&_nc_ht=scontent.xx&oh=eaa0c18d2823fe813960f06f60585643&oe=5E08F8C6", - "subscriberCount": 515865, - "url": "https://www.facebook.com/114945745226947", - "platform": "Facebook", - "platformId": "114945745226947", - "verified": True, - }, - }, - { - "id": 70161348039, - "platformId": "10513336322_10158174166611323", - "platform": "Facebook", - "date": "2019-09-07 20:15:07", - "updated": "2019-09-08 00:42:26", - "type": "link", - "title": "Tories extend lead over Labour to 10% despite chaotic week", - "caption": "theguardian.com", - "description": "More than half of all leave voters are now planning to vote for Boris Johnson", - "expandedLinks": [ - { - "original": "https://www.theguardian.com/politics/2019/sep/07/tories-extend-lead-over-labour-to-10-despite-chaotic-week?CMP=fb_gu&utm_medium=Social&utm_source=Facebook#Echobox=1567874494", - "expanded": "https://www.theguardian.com/politics/2019/sep/07/tories-extend-lead-over-labour-to-10-despite-chaotic-week?CMP=fb_gu&utm_medium=Social&utm_source=Facebook#Echobox=1567874494", - } - ], - "link": "https://www.theguardian.com/politics/2019/sep/07/tories-extend-lead-over-labour-to-10-despite-chaotic-week?CMP=fb_gu&utm_medium=Social&utm_source=Facebook#Echobox=1567874494", - "postUrl": "https://www.facebook.com/theguardian/posts/10158174166611323", - "subscriberCount": 8186083, - "score": 6.033670033670034, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBHdBpCuwOKnssn&w=630&h=630&url=https%3A%2F%2Fi.guim.co.uk%2Fimg%2Fmedia%2F90f73b5d84cf6cd9995aa9f3b96b7f35d81327fa%2F0_63_4000_2400%2Fmaster%2F4000.jpg%3Fwidth%3D1200%26height%3D630%26quality%3D85%26auto%3Dformat%26fit%3Dcrop%26overlay-align%3Dbottom%252Cleft%26overlay-width%3D100p%26overlay-base64%3DL2ltZy9zdGF0aWMvb3ZlcmxheXMvdGctZGVmYXVsdC5wbmc%26enable%3Dupscale%26s%3D5d518132e7ce9c21438063fc255d64c9&cfs=1&sx=423&sy=0&sw=630&sh=630&_nc_hash=AQA2aANjQU1hmNc7", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAUfzEgAwSgTSmh&url=https%3A%2F%2Fi.guim.co.uk%2Fimg%2Fmedia%2F90f73b5d84cf6cd9995aa9f3b96b7f35d81327fa%2F0_63_4000_2400%2Fmaster%2F4000.jpg%3Fwidth%3D1200%26height%3D630%26quality%3D85%26auto%3Dformat%26fit%3Dcrop%26overlay-align%3Dbottom%252Cleft%26overlay-width%3D100p%26overlay-base64%3DL2ltZy9zdGF0aWMvb3ZlcmxheXMvdGctZGVmYXVsdC5wbmc%26enable%3Dupscale%26s%3D5d518132e7ce9c21438063fc255d64c9&_nc_hash=AQDlZVsz9TV4cIMo", - } - ], - "statistics": { - "actual": { - "likeCount": 281, - "shareCount": 206, - "commentCount": 582, - "loveCount": 24, - "wowCount": 122, - "hahaCount": 225, - "sadCount": 230, - "angryCount": 122, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 123, - "shareCount": 46, - "commentCount": 60, - "loveCount": 11, - "wowCount": 9, - "hahaCount": 26, - "sadCount": 7, - "angryCount": 15, - "thankfulCount": 0, - }, - }, - "account": { - "id": 5740, - "name": "The Guardian", - "handle": "theguardian", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/46160148_10157340584076323_3990431626264838144_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkKD6tb0oraHl_Qq9dA1S51ktyWhE9lPo7udOrFCRkfCctJldfDrwPVn7PcSDSY5Sc&_nc_ht=scontent.xx&oh=8c51a127f7d06b002a6fcba57abe5181&oe=5DFDE22E", - "subscriberCount": 8186263, - "url": "https://www.facebook.com/10513336322", - "platform": "Facebook", - "platformId": "10513336322", - "verified": True, - }, - }, - { - "id": 70162846697, - "platformId": "86680728811_10158783468813812", - "platform": "Facebook", - "date": "2019-09-07 20:48:11", - "updated": "2019-09-08 00:45:25", - "type": "link", - "title": "Duchess Meghan supports pal Serena Williams at US Open final", - "caption": "abcnews.go.com", - "description": " ", - "message": "Serena Williams' close friend Meghan, the Duchess of Sussex, took a last-minute flight from London to New York to watch Williams play against 19-year-old Canadian Bianca Andreescu.", - "expandedLinks": [ - { - "original": "https://abcn.ws/2Lx2p2T", - "expanded": "https://abcnews.go.com/GMA/Culture/duchess-meghan-supports-pal-serena-williams-us-open/story?id=65428970&cid=social_fb_abcn&fbclid=IwAR0nlM2hr7NUPWnuk_WV1XcUS_IYX3FiPCRl2WJ2RzFS2htHqdSUywFN9no", - } - ], - "link": "https://abcn.ws/2Lx2p2T", - "postUrl": "https://www.facebook.com/ABCNews/posts/10158783468813812", - "subscriberCount": 14195962, - "score": 6.016528925619835, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBa1SjmHvfScQgZ&w=558&h=558&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FUS%2Fmeghan-markle-01-as-usa-190908_hpMain_16x9_992.jpg&cfs=1&sx=12&sy=0&sw=558&sh=558&_nc_hash=AQBCA2r4xAUn3lnA", - "height": 558, - "width": 558, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQApcYviDYU_iBlu&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FUS%2Fmeghan-markle-01-as-usa-190908_hpMain_16x9_992.jpg&_nc_hash=AQCbSSXvjqOh_moq", - } - ], - "statistics": { - "actual": { - "likeCount": 1618, - "shareCount": 76, - "commentCount": 160, - "loveCount": 269, - "wowCount": 8, - "hahaCount": 32, - "sadCount": 3, - "angryCount": 18, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 148, - "shareCount": 71, - "commentCount": 63, - "loveCount": 17, - "wowCount": 19, - "hahaCount": 17, - "sadCount": 17, - "angryCount": 11, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13878, - "name": "ABC News", - "handle": "ABCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02", - "subscriberCount": 14196629, - "url": "https://www.facebook.com/86680728811", - "platform": "Facebook", - "platformId": "86680728811", - "verified": True, - }, - }, - { - "id": 70163621987, - "platformId": "908009612563863_2986539611377509", - "platform": "Facebook", - "date": "2019-09-07 21:00:24", - "updated": "2019-09-08 00:48:09", - "type": "native_video", - "message": "Watch President Trump try to convince his supporters that he's 'competent'", - "expandedLinks": [ - { - "original": "https://www.facebook.com/NowThisPolitics/videos/733410037104996/", - "expanded": "https://www.facebook.com/NowThisPolitics/videos/733410037104996/", - } - ], - "link": "https://www.facebook.com/NowThisPolitics/videos/733410037104996/", - "postUrl": "https://www.facebook.com/NowThisPolitics/posts/2986539611377509", - "subscriberCount": 6074083, - "score": 6.015006821282401, - "media": [ - { - "type": "video", - "url": "https://video.xx.fbcdn.net/v/t42.9040-2/41228504_471356220011696_2218706403474800640_n.mp4?_nc_cat=105&efg=eyJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_log=1&_nc_oc=AQnv4wN5MIFBZEQnQlm8UqthX3urGg9G4rxVLRSLXw5PTzatvEX8YB2-kbDQNu597IA&_nc_ht=video.xx&oh=74fb95761cacc344c6a6ea6430e8e10e&oe=5D76A240", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t15.5256-10/p720x720/38959840_531215240657811_1501424857630375936_n.jpg?_nc_cat=110&_nc_log=1&_nc_oc=AQnpQThDDK9CszIVBfogQrEoXnsBquG5sVegZ5s03mA9VBnPSa1eQmmcSBz90QoX8M4&_nc_ht=scontent.xx&oh=b886a06dd6bcb662132ed7bc4248cf19&oe=5E0D62F9", - "height": 720, - "width": 720, - "full": "https://scontent.xx.fbcdn.net/v/t15.5256-10/38959840_531215240657811_1501424857630375936_n.jpg?_nc_cat=110&_nc_log=1&_nc_oc=AQnpQThDDK9CszIVBfogQrEoXnsBquG5sVegZ5s03mA9VBnPSa1eQmmcSBz90QoX8M4&_nc_ht=scontent.xx&oh=c2d30b81ca63fc2e70d5b4d25405c419&oe=5E0DD6A2", - }, - ], - "statistics": { - "actual": { - "likeCount": 277, - "shareCount": 481, - "commentCount": 1522, - "loveCount": 27, - "wowCount": 60, - "hahaCount": 1583, - "sadCount": 75, - "angryCount": 384, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 266, - "shareCount": 218, - "commentCount": 111, - "loveCount": 29, - "wowCount": 22, - "hahaCount": 27, - "sadCount": 31, - "angryCount": 29, - "thankfulCount": 0, - }, - }, - "account": { - "id": 311636, - "name": "NowThis Politics", - "handle": "NowThisPolitics", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/28276603_1939096412788506_2850422809072819205_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlBSULvu9xr5smvB3kmRub5MfL3SpyPxNX94GEyc5skmb19swOR40nthDv1Kip3kcw&_nc_ht=scontent.xx&oh=b734d3faa39291c805198e3ad7de3450&oe=5DFF0890", - "subscriberCount": 6074746, - "url": "https://www.facebook.com/908009612563863", - "platform": "Facebook", - "platformId": "908009612563863", - "verified": True, - }, - "videoLengthMS": 154281, - }, - { - "id": 70163173554, - "platformId": "21898300328_10159284952995329", - "platform": "Facebook", - "date": "2019-09-07 20:55:00", - "updated": "2019-09-08 00:35:29", - "type": "native_video", - "message": "I'm obsessed with marshmallows ✨", - "expandedLinks": [ - { - "original": "https://www.facebook.com/BuzzFeed/videos/2514328838787000/", - "expanded": "https://www.facebook.com/BuzzFeed/videos/2514328838787000/", - } - ], - "link": "https://www.facebook.com/BuzzFeed/videos/2514328838787000/", - "postUrl": "https://www.facebook.com/BuzzFeed/posts/10159284952995329", - "subscriberCount": 11870805, - "score": 5.962962962962963, - "media": [ - { - "type": "video", - "url": "https://video.xx.fbcdn.net/v/t42.9040-2/70691299_728554827590593_3557958716256944128_n.mp4?_nc_cat=110&efg=eyJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_log=1&_nc_oc=AQkMkRLolK3KyQb50CCZ1lJy-iI9Y7FbTL4UiJz6G2j0LVvsBHe1PKdJjT9804sPfys&_nc_ht=video.xx&oh=7827d58f3e48c091ca7eaf5a49bc280a&oe=5D769C33", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t15.5256-10/p720x720/67695914_2514330655453485_3708524408340480000_n.jpg?_nc_cat=102&_nc_log=1&_nc_oc=AQkHho01TS09AnhvtVCMUZPEu6nBoy5iGEXYQFVC7uG9Eokr-hPZdB6b934Kdlo4wCw&_nc_ht=scontent.xx&oh=8b65fd6f342f1d51825e0647ce1d4e54&oe=5E0EACC5", - "height": 720, - "width": 720, - "full": "https://scontent.xx.fbcdn.net/v/t15.5256-10/67695914_2514330655453485_3708524408340480000_n.jpg?_nc_cat=102&_nc_log=1&_nc_oc=AQkHho01TS09AnhvtVCMUZPEu6nBoy5iGEXYQFVC7uG9Eokr-hPZdB6b934Kdlo4wCw&_nc_ht=scontent.xx&oh=d28bfa966e5df214266a2859f2264032&oe=5E086471", - }, - ], - "statistics": { - "actual": { - "likeCount": 638, - "shareCount": 113, - "commentCount": 235, - "loveCount": 282, - "wowCount": 0, - "hahaCount": 20, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 110, - "shareCount": 34, - "commentCount": 28, - "loveCount": 26, - "wowCount": 4, - "hahaCount": 8, - "sadCount": 4, - "angryCount": 2, - "thankfulCount": 0, - }, - }, - "account": { - "id": 5862, - "name": "BuzzFeed", - "handle": "BuzzFeed", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/11222622_10153870407270329_4094729505669388790_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlaEUp906VUUmeEPgfBCNmaczf4owSg6ehvwRebY_UVmSGVjDB_IUr4WGPgzRnptXU&_nc_ht=scontent.xx&oh=96b0a01485175975acdaeb06feb9d222&oe=5E06A54B", - "subscriberCount": 11870650, - "url": "https://www.facebook.com/21898300328", - "platform": "Facebook", - "platformId": "21898300328", - "verified": True, - }, - "videoLengthMS": 184894, - }, - { - "id": 70159572807, - "platformId": "34407447433_10156144115942434", - "platform": "Facebook", - "date": "2019-09-07 19:43:00", - "updated": "2019-09-08 00:29:54", - "type": "link", - "title": "Victim Allowed to Testify Through the 2,500 Personalities She Invented to Survive Childhood Sexual Abuse", - "caption": "jezebel.com", - "description": "An Australian survivor of horrific childhood sexual, physical, and mental abuse might be the first person in the world to testify in court through the personalties she invented in order to survive the trauma.\r\n", - "message": '"Make no mistake, my dad caused my Multiple Personality Disorder," survivor Jeni Haynes told the court.', - "expandedLinks": [ - { - "original": "https://trib.al/hAZh3HX", - "expanded": "https://jezebel.com/victim-allowed-to-testify-through-the-2-500-personaliti-1837956427?rev=1567885072422&utm_medium=socialflow&utm_campaign=socialflow_jezebel_facebook&utm_source=jezebel_facebook&fbclid=IwAR37XfjKPTCo-byOlmC1hhdH-ka2zZmxQ24rDjMLXcGUhle1wMSkBviiZpE&fbclid=IwAR1C0S2pjDBEpenxMFVLXP0sm_iwVMG-zah8UZZ6SyRcMl9vD8_qbv029s0&fbclid=IwAR1o83O6e0Cw9V69W4gIhERu2UOcRJq-06qE8cqoMblOiZg0kgFwO4G8mK4", - } - ], - "link": "https://trib.al/hAZh3HX", - "postUrl": "https://www.facebook.com/Jezebel/posts/10156144115942434", - "subscriberCount": 815764, - "score": 5.844827586206897, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBEDDfWffmh6g5X&w=720&h=720&url=https%3A%2F%2Fi.kinja-img.com%2Fgawker-media%2Fimage%2Fupload%2Fs--bxvolnQa--%2Fc_fill%2Cfl_progressive%2Cg_center%2Ch_900%2Cq_80%2Cw_1600%2Fxqm0wmlfe2niquacansa.jpg&cfs=1&_nc_hash=AQBjyPpwDRCl8eHJ", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCrFKkF0CWTEaIs&url=https%3A%2F%2Fi.kinja-img.com%2Fgawker-media%2Fimage%2Fupload%2Fs--bxvolnQa--%2Fc_fill%2Cfl_progressive%2Cg_center%2Ch_900%2Cq_80%2Cw_1600%2Fxqm0wmlfe2niquacansa.jpg&_nc_hash=AQAZRCMo7DPfzgCP", - } - ], - "statistics": { - "actual": { - "likeCount": 108, - "shareCount": 37, - "commentCount": 15, - "loveCount": 6, - "wowCount": 50, - "hahaCount": 0, - "sadCount": 120, - "angryCount": 3, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 14, - "shareCount": 6, - "commentCount": 7, - "loveCount": 7, - "wowCount": 4, - "hahaCount": 5, - "sadCount": 3, - "angryCount": 12, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6753, - "name": "Jezebel", - "handle": "Jezebel", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10632833_10152652146387434_9205889665163163075_n.png?_nc_cat=1&_nc_oc=AQmE3moAD_e4DDH0Qk-IkkzGJ36IXDD-O29mmUauemxpi5JLbY-oMjPMeCglmwSb0Rs&_nc_ht=scontent.xx&oh=d7afae2d39ef36c76291f53c416d6c76&oe=5E0F900A", - "subscriberCount": 815764, - "url": "https://www.facebook.com/34407447433", - "platform": "Facebook", - "platformId": "34407447433", - "verified": True, - }, - }, - { - "id": 70163771159, - "platformId": "32109457015_10156178850177016", - "platform": "Facebook", - "date": "2019-09-07 21:02:21", - "updated": "2019-09-08 00:23:22", - "type": "link", - "title": "Ahead of Standing Ovation at New Hampshire Democratic Convention, Sanders Camp Announces Endorsements From 53 State Dems", - "caption": "commondreams.org", - "description": '"This latest round of endorsements shows we are not only retaining and engaging supporters from 2016, but building new support from a broad swath of leaders from around the state."', - "message": "A big few days for the Vermont senator.", - "expandedLinks": [ - { - "original": "https://www.commondreams.org/news/2019/09/07/ahead-standing-ovation-new-hampshire-democratic-convention-sanders-camp-announces-0", - "expanded": "https://www.commondreams.org/news/2019/09/07/ahead-standing-ovation-new-hampshire-democratic-convention-sanders-camp-announces-0", - } - ], - "link": "https://www.commondreams.org/news/2019/09/07/ahead-standing-ovation-new-hampshire-democratic-convention-sanders-camp-announces-0", - "postUrl": "https://www.facebook.com/commondreams.org/posts/10156178850177016", - "subscriberCount": 366669, - "score": 5.829059829059829, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDKBlgliFGvmLnF&w=500&h=500&url=https%3A%2F%2Fwww.commondreams.org%2Fsites%2Fdefault%2Ffiles%2Fheadline%2Fthumbs%2Fsanders_0.png&cfs=1&sx=347&sy=0&sw=500&sh=500&_nc_hash=AQDwJC-1_y7Jjb-K", - "height": 500, - "width": 500, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDw3ov_HaM2oZ-I&url=https%3A%2F%2Fwww.commondreams.org%2Fsites%2Fdefault%2Ffiles%2Fheadline%2Fthumbs%2Fsanders_0.png&_nc_hash=AQA9h2rZgQp5Bf9N", - } - ], - "statistics": { - "actual": { - "likeCount": 365, - "shareCount": 143, - "commentCount": 22, - "loveCount": 149, - "wowCount": 0, - "hahaCount": 3, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 32, - "shareCount": 30, - "commentCount": 8, - "loveCount": 7, - "wowCount": 4, - "hahaCount": 3, - "sadCount": 9, - "angryCount": 24, - "thankfulCount": 0, - }, - }, - "account": { - "id": 9840, - "name": "CommonDreams", - "handle": "commondreams.org", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10469767_10152172973972016_8063428021861554001_n.jpg?_nc_cat=103&_nc_oc=AQlnGcnqptQQZZC2ssw_mVUJY3OL5CzMA_2hp5GQtIg_0HMCwMmn9q28KrUoNRmgbtU&_nc_ht=scontent.xx&oh=71d9a7a103ab2f477e27840eb92ac030&oe=5E040D0C", - "subscriberCount": 366664, - "url": "https://www.facebook.com/32109457015", - "platform": "Facebook", - "platformId": "32109457015", - "verified": True, - }, - }, - { - "id": 70158517493, - "platformId": "10513336322_10158174007266323", - "platform": "Facebook", - "date": "2019-09-07 19:11:30", - "updated": "2019-09-08 00:42:26", - "type": "link", - "title": "'I refuse to die in here': the marine who survived two tours and is now fighting deportation", - "caption": "theguardian.com", - "description": "In his 21 months of detention, Jose Segovia Benitez says he’s been denied critical treatment for his PTSD and heart condition", - "message": '"I try to hold onto any kind of strength I have, but I’m not doing well mentally, emotionally, physically.”', - "expandedLinks": [ - { - "original": "https://www.theguardian.com/us-news/2019/sep/06/i-refuse-to-die-in-here-the-marine-who-survived-two-tours-and-is-now-fighting-deportation?CMP=fb_gu&utm_medium=Social&utm_source=Facebook#Echobox=1567867472", - "expanded": "https://www.theguardian.com/us-news/2019/sep/06/i-refuse-to-die-in-here-the-marine-who-survived-two-tours-and-is-now-fighting-deportation?CMP=fb_gu&utm_medium=Social&utm_source=Facebook#Echobox=1567867472", - } - ], - "link": "https://www.theguardian.com/us-news/2019/sep/06/i-refuse-to-die-in-here-the-marine-who-survived-two-tours-and-is-now-fighting-deportation?CMP=fb_gu&utm_medium=Social&utm_source=Facebook#Echobox=1567867472", - "postUrl": "https://www.facebook.com/theguardian/posts/10158174007266323", - "subscriberCount": 8186083, - "score": 5.7974683544303796, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQC9TU3AzHWjm7pP&w=630&h=630&url=https%3A%2F%2Fi.guim.co.uk%2Fimg%2Fmedia%2F42943b55dd663362b3a2eebf62397e83e7e6931e%2F0_215_3200_1920%2Fmaster%2F3200.jpg%3Fwidth%3D1200%26height%3D630%26quality%3D85%26auto%3Dformat%26fit%3Dcrop%26overlay-align%3Dbottom%252Cleft%26overlay-width%3D100p%26overlay-base64%3DL2ltZy9zdGF0aWMvb3ZlcmxheXMvdGctZGVmYXVsdC5wbmc%26enable%3Dupscale%26s%3Df38d9b9d9f1c0a1c34aabd72cc8836ae&cfs=1&sx=90&sy=0&sw=630&sh=630&_nc_hash=AQB0G9pHPr3Yu-eW", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCgxJMwqaqfvKmo&url=https%3A%2F%2Fi.guim.co.uk%2Fimg%2Fmedia%2F42943b55dd663362b3a2eebf62397e83e7e6931e%2F0_215_3200_1920%2Fmaster%2F3200.jpg%3Fwidth%3D1200%26height%3D630%26quality%3D85%26auto%3Dformat%26fit%3Dcrop%26overlay-align%3Dbottom%252Cleft%26overlay-width%3D100p%26overlay-base64%3DL2ltZy9zdGF0aWMvb3ZlcmxheXMvdGctZGVmYXVsdC5wbmc%26enable%3Dupscale%26s%3Df38d9b9d9f1c0a1c34aabd72cc8836ae&_nc_hash=AQA3ZYBOq8NMQ35O", - } - ], - "statistics": { - "actual": { - "likeCount": 217, - "shareCount": 456, - "commentCount": 114, - "loveCount": 6, - "wowCount": 38, - "hahaCount": 5, - "sadCount": 420, - "angryCount": 576, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 133, - "shareCount": 50, - "commentCount": 63, - "loveCount": 12, - "wowCount": 9, - "hahaCount": 27, - "sadCount": 7, - "angryCount": 15, - "thankfulCount": 0, - }, - }, - "account": { - "id": 5740, - "name": "The Guardian", - "handle": "theguardian", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/46160148_10157340584076323_3990431626264838144_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkKD6tb0oraHl_Qq9dA1S51ktyWhE9lPo7udOrFCRkfCctJldfDrwPVn7PcSDSY5Sc&_nc_ht=scontent.xx&oh=8c51a127f7d06b002a6fcba57abe5181&oe=5DFDE22E", - "subscriberCount": 8186263, - "url": "https://www.facebook.com/10513336322", - "platform": "Facebook", - "platformId": "10513336322", - "verified": True, - }, - }, - { - "id": 70170343869, - "platformId": "123624513983_10157820072718984", - "platform": "Facebook", - "date": "2019-09-07 23:00:02", - "updated": "2019-09-08 00:38:24", - "type": "link", - "title": "Age 8 Boy Fights Off Mountain Lion After It Bit Him in Head and Dragged His Body Down Hill", - "caption": "westernjournal.com", - "description": "Pike Carlson, 8, fought back against a mountain lion who attacked and bit him on the face while he was playing outside in his backyard.", - "message": "The little boy knew he had to fight back when the 65-pound mountain lion began to attack. 'I was just punching, trying to grab anything that I can, like a stick,' he said. 'I did find a stick and I tried to get it in the eye but soon the stick snapped.'", - "expandedLinks": [ - { - "original": "https://www.westernjournal.com/age-8-boy-fights-off-mountain-lion-bit-head-dragged-body-hill/?utm_source=facebook&utm_medium=westernjournalism&utm_content=2019-09-07&utm_campaign=manualpost", - "expanded": "https://www.westernjournal.com/age-8-boy-fights-off-mountain-lion-bit-head-dragged-body-hill/?utm_source=facebook&utm_medium=westernjournalism&utm_content=2019-09-07&utm_campaign=manualpost", - } - ], - "link": "https://www.westernjournal.com/age-8-boy-fights-off-mountain-lion-bit-head-dragged-body-hill/?utm_source=facebook&utm_medium=westernjournalism&utm_content=2019-09-07&utm_campaign=manualpost", - "postUrl": "https://www.facebook.com/WesternJournal/posts/10157820072718984", - "subscriberCount": 5185113, - "score": 5.781818181818182, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQC3rSRZQb_RMo2s&w=720&h=720&url=https%3A%2F%2Fwww.westernjournal.com%2Fwp-content%2Fuploads%2F2019%2F09%2FUntitled-design-7.jpg&cfs=1&_nc_hash=AQCFUuE0zmdVLXCn", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAtGTuiuTsPrguo&url=https%3A%2F%2Fwww.westernjournal.com%2Fwp-content%2Fuploads%2F2019%2F09%2FUntitled-design-7.jpg&_nc_hash=AQBrZ83xlVP9Nzno", - } - ], - "statistics": { - "actual": { - "likeCount": 620, - "shareCount": 229, - "commentCount": 196, - "loveCount": 53, - "wowCount": 333, - "hahaCount": 7, - "sadCount": 151, - "angryCount": 1, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 84, - "shareCount": 49, - "commentCount": 55, - "loveCount": 8, - "wowCount": 13, - "hahaCount": 18, - "sadCount": 8, - "angryCount": 40, - "thankfulCount": 0, - }, - }, - "account": { - "id": 93420, - "name": "The Western Journal", - "handle": "WesternJournal", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49664345_10157205261148984_1256195277434388480_n.png?_nc_cat=1&_nc_oc=AQkUo1GJrlGqxXcfjFxGkOcXookw_tgn8qATXCSI0ICt6sibuBdTtyIuuWj9iPLw5ZM&_nc_ht=scontent.xx&oh=bf010f921f678fbb0032a465900b5f24&oe=5DF8F16D", - "subscriberCount": 5184899, - "url": "https://www.facebook.com/123624513983", - "platform": "Facebook", - "platformId": "123624513983", - "verified": True, - }, - }, - { - "id": 70160309952, - "platformId": "338028696036_10158536072151037", - "platform": "Facebook", - "date": "2019-09-07 20:00:01", - "updated": "2019-09-08 00:27:38", - "type": "link", - "title": "Bernie Sanders joins LeBron James in support of bill allowing college athletes to be paid", - "caption": "sports.yahoo.com", - "description": "A bill awaiting vote in the California state assembly could trigger an upheaval in NCAA athletics.", - "message": '"College athletes are workers. Pay them. "', - "expandedLinks": [ - { - "original": "https://news.yahoo.com/lebron-james-bernie-sanders-california-ncaa-034858749.html", - "expanded": "https://news.yahoo.com/lebron-james-bernie-sanders-california-ncaa-034858749.html", - } - ], - "link": "https://news.yahoo.com/lebron-james-bernie-sanders-california-ncaa-034858749.html", - "postUrl": "https://www.facebook.com/yahoonews/posts/10158536072151037", - "subscriberCount": 7866135, - "score": 5.771844660194175, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQChKNVaz3GsOVDZ&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73e65dca9a410594352803%2F72d18ae212954572d6739405c59d6cc2d7369ee6_96a348701633291e228c01c4c0f51215419bd124_facebook&cfs=1&_nc_hash=AQDTVopnB73UtOAk", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDggKqkAAIXLAVB&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73e65dca9a410594352803%2F72d18ae212954572d6739405c59d6cc2d7369ee6_96a348701633291e228c01c4c0f51215419bd124_facebook&_nc_hash=AQAmylurbIXZ1Tbw", - } - ], - "statistics": { - "actual": { - "likeCount": 627, - "shareCount": 146, - "commentCount": 233, - "loveCount": 70, - "wowCount": 5, - "hahaCount": 77, - "sadCount": 1, - "angryCount": 30, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 55, - "shareCount": 35, - "commentCount": 50, - "loveCount": 9, - "wowCount": 9, - "hahaCount": 24, - "sadCount": 9, - "angryCount": 15, - "thankfulCount": 0, - }, - }, - "account": { - "id": 16337, - "name": "Yahoo News", - "handle": "yahoonews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1234558_10151822723996037_1232781499_n.jpg?_nc_cat=1&_nc_oc=AQkPmfbCJFc9Ll_w6v-FBqGGulHvLsK6m9J20HAPS45YGyFGlkUw6ZZKS6yuaKxI_V0&_nc_ht=scontent.xx&oh=e2ffaa2bbb04dd746da7d26542134656&oe=5DFF9BED", - "subscriberCount": 7865795, - "url": "https://www.facebook.com/338028696036", - "platform": "Facebook", - "platformId": "338028696036", - "verified": True, - }, - }, - { - "id": 70160979151, - "platformId": "155869377766434_3572675879419083", - "platform": "Facebook", - "date": "2019-09-07 20:02:05", - "updated": "2019-09-08 00:20:11", - "type": "link", - "title": "Thousands listed as missing in Bahamas in Hurricane Dorian’s wake", - "caption": "nbcnews.com", - "description": " ", - "message": "Thousands of people are desperately trying to find loved ones in the Bahamas.", - "expandedLinks": [ - { - "original": "https://nbcnews.to/2Lvx4h8", - "expanded": "https://www.nbcnews.com/news/world/thousands-listed-missing-bahamas-hurricane-dorian-s-wake-n1050791?cid=sm_npd_nn_fb_ma", - } - ], - "link": "https://nbcnews.to/2Lvx4h8", - "postUrl": "https://www.facebook.com/NBCNews/posts/3572675879419083", - "subscriberCount": 9970622, - "score": 5.688976377952756, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQD6stbq-rFP7Jda&w=720&h=720&url=https%3A%2F%2Fmedia1.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F3000001%2F190906-bahamas-aftermath-dorian-al-1111_fbd341856b3fa8ce3a08a04f0fca9b14.nbcnews-fp-1200-630.jpg&cfs=1&_nc_hash=AQBVY5Go-4zF-tlS", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCocMqpL-yoqFsO&url=https%3A%2F%2Fmedia1.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F3000001%2F190906-bahamas-aftermath-dorian-al-1111_fbd341856b3fa8ce3a08a04f0fca9b14.nbcnews-fp-1200-630.jpg&_nc_hash=AQDssZadqERvIDEf", - } - ], - "statistics": { - "actual": { - "likeCount": 75, - "shareCount": 375, - "commentCount": 59, - "loveCount": 1, - "wowCount": 43, - "hahaCount": 1, - "sadCount": 888, - "angryCount": 3, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 61, - "shareCount": 50, - "commentCount": 54, - "loveCount": 10, - "wowCount": 19, - "hahaCount": 19, - "sadCount": 21, - "angryCount": 20, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13889, - "name": "NBC News", - "handle": "NBCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/58460954_3259154034104604_4667908299973197824_n.png?_nc_cat=1&_nc_oc=AQkP72-xbAw6uUN-KZG8hLfS-bT5o6BRIMSNURKuXBbEhrFa7sT75fvZfTBZDVa21CU&_nc_ht=scontent.xx&oh=ddb1e61de6dabbf61e903f59efde1f0c&oe=5DF7A653", - "subscriberCount": 9970540, - "url": "https://www.facebook.com/155869377766434", - "platform": "Facebook", - "platformId": "155869377766434", - "verified": True, - }, - }, - { - "id": 70170621365, - "platformId": "10498053716_10156935398183717", - "platform": "Facebook", - "date": "2019-09-07 23:00:00", - "updated": "2019-09-08 00:44:10", - "type": "link", - "title": "FAIL: Toronto Chick-fil-A Protesters Ignored as They Stage 'Die-In' Demonstration", - "caption": "mrctv.org", - "description": "You know the best thing about the\xa0grand opening of a Chick-fil-A?\xa0", - "message": 'VIDEO: Protesters held a "die-in" in front of Toronto\'s first Chick-fil-A. Patrons essentially ignored them by simply walking over or around them to get their food.', - "expandedLinks": [ - { - "original": "https://www.mrctv.org/blog/toronto-chick-fil-protesters-ignored-they-stage-die-demonstration", - "expanded": "https://www.mrctv.org/blog/toronto-chick-fil-protesters-ignored-they-stage-die-demonstration", - } - ], - "link": "https://www.mrctv.org/blog/toronto-chick-fil-protesters-ignored-they-stage-die-demonstration", - "postUrl": "https://www.facebook.com/mediaresearchcenter/posts/10156935398183717", - "subscriberCount": 1853276, - "score": 5.529411764705882, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCo1ARLaetR4aJ5&w=720&h=720&url=https%3A%2F%2Fcdn.mrctv.org%2Ffiles%2Fstyles%2Fmedium%2Fs3%2F2019-09%2Flooney%2520bags.png%3Fh%3D7d1f3709&cfs=1&_nc_hash=AQD7e20NjdW5a_7I", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCIixan6kLeR8U_&url=https%3A%2F%2Fcdn.mrctv.org%2Ffiles%2Fstyles%2Fmedium%2Fs3%2F2019-09%2Flooney%2520bags.png%3Fh%3D7d1f3709&_nc_hash=AQAL8Q6mRJoTX-E7", - } - ], - "statistics": { - "actual": { - "likeCount": 167, - "shareCount": 106, - "commentCount": 228, - "loveCount": 16, - "wowCount": 4, - "hahaCount": 504, - "sadCount": 1, - "angryCount": 8, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 17, - "shareCount": 22, - "commentCount": 61, - "loveCount": 4, - "wowCount": 5, - "hahaCount": 42, - "sadCount": 4, - "angryCount": 32, - "thankfulCount": 0, - }, - }, - "account": { - "id": 327932, - "name": "Media Research Center", - "handle": "mediaresearchcenter", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22814117_10155267787348717_9035099093135610710_n.png?_nc_cat=1&_nc_oc=AQnlBU3OCfeS-5QWg2v10Je2qqRgOr8VQS088-pc6gM4VZ_wGRCLBF_h5ObNobn7SOE&_nc_ht=scontent.xx&oh=4444f21775a6df49bc0c533f492d5953&oe=5E0B587B", - "subscriberCount": 1853272, - "url": "https://www.facebook.com/10498053716", - "platform": "Facebook", - "platformId": "10498053716", - "verified": True, - }, - }, - { - "id": 70158941544, - "platformId": "618786471475708_2644759942211674", - "platform": "Facebook", - "date": "2019-09-07 19:30:00", - "updated": "2019-09-08 00:30:47", - "type": "native_video", - "message": "This bear took a nap in a restaurant’s bathroom — and slept through the staff’s attempts to get it out 🐻💤😂", - "expandedLinks": [ - { - "original": "https://www.facebook.com/BuzzFeedNews/videos/233829937513838/", - "expanded": "https://www.facebook.com/BuzzFeedNews/videos/233829937513838/", - } - ], - "link": "https://www.facebook.com/BuzzFeedNews/videos/233829937513838/", - "postUrl": "https://www.facebook.com/BuzzFeedNews/posts/2644759942211674", - "subscriberCount": 3015477, - "score": 5.408888888888889, - "media": [ - { - "type": "video", - "url": "https://video.xx.fbcdn.net/v/t42.9040-2/70829649_1369510883210885_6464376971089412096_n.mp4?_nc_cat=108&efg=eyJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_log=1&_nc_oc=AQkHMJ6YyNp5AfqE41JKIISreqFTOyyA9I_-UFm2Chcl-8t99lw6g8x6ckxMudx1dKg&_nc_ht=video.xx&oh=a383273ff1f2a898fbde4a5d47bef863&oe=5D75AC2F", - "height": 0, - "width": 0, - }, - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t15.5256-10/p720x720/67264768_233830367513795_1857633506536980480_n.jpg?_nc_cat=109&_nc_log=1&_nc_oc=AQkL-NnpOehPSuLPO1pX0UitjWUMHRXEc8dlumSIUOlEmjWlnFFFM0DGVf5XOIusMrw&_nc_ht=scontent.xx&oh=13a862e4afac730aca26503d79ea752d&oe=5E09E16D", - "height": 720, - "width": 720, - "full": "https://scontent.xx.fbcdn.net/v/t15.5256-10/67264768_233830367513795_1857633506536980480_n.jpg?_nc_cat=109&_nc_log=1&_nc_oc=AQkL-NnpOehPSuLPO1pX0UitjWUMHRXEc8dlumSIUOlEmjWlnFFFM0DGVf5XOIusMrw&_nc_ht=scontent.xx&oh=7303e4e196b9d46aa13a0c6638d20567&oe=5DF4E136", - }, - ], - "statistics": { - "actual": { - "likeCount": 447, - "shareCount": 223, - "commentCount": 173, - "loveCount": 132, - "wowCount": 35, - "hahaCount": 204, - "sadCount": 3, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 104, - "shareCount": 32, - "commentCount": 19, - "loveCount": 38, - "wowCount": 5, - "hahaCount": 21, - "sadCount": 4, - "angryCount": 2, - "thankfulCount": 0, - }, - }, - "account": { - "id": 18756, - "name": "BuzzFeed News", - "handle": "BuzzFeedNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/37324661_1987747984579543_6544772647132069888_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQl4xuZMtXJ6qFqyRhwKzfdvsAYA1JGI1ajz4X8q4bIHiObnrMGyXWEFiDcVxaVlrgM&_nc_ht=scontent.xx&oh=3a3c2ae104e50e8860b8dcf413215500&oe=5DFB7022", - "subscriberCount": 3017031, - "url": "https://www.facebook.com/618786471475708", - "platform": "Facebook", - "platformId": "618786471475708", - "verified": True, - }, - "videoLengthMS": 55891, - }, - { - "id": 70176047384, - "platformId": "273864989376427_2990324384397127", - "platform": "Facebook", - "date": "2019-09-08 00:24:18", - "updated": "2019-09-08 00:44:08", - "type": "link", - "title": "Trump says he's calling off Afghanistan peace talks and secret meeting he had planned with Taliban", - "caption": "nbcnews.com", - "description": '"Unbeknownst to almost everyone, the major Taliban leaders ... were going to secretly meet with me at Camp David on Sunday," Trump tweeted.', - "message": "BREAKING: Days ahead of 9/11 anniversary, Pres. Trump announces that he was set to hold secret talks with the Taliban at Camp David in the US this weekend but he has called off the talks after a US service member was killed in a suicide attack in Kabul.", - "expandedLinks": [ - { - "original": "https://on.msnbc.com/34zdcTc", - "expanded": "https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_ms_fb_ma", - } - ], - "link": "https://on.msnbc.com/34zdcTc", - "postUrl": "https://www.facebook.com/msnbc/posts/2990324384397127", - "subscriberCount": 2290512, - "score": 5.383333333333334, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5", - } - ], - "statistics": { - "actual": { - "likeCount": 55, - "shareCount": 155, - "commentCount": 272, - "loveCount": 1, - "wowCount": 85, - "hahaCount": 228, - "sadCount": 9, - "angryCount": 164, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 40, - "shareCount": 21, - "commentCount": 47, - "loveCount": 3, - "wowCount": 7, - "hahaCount": 16, - "sadCount": 11, - "angryCount": 35, - "thankfulCount": 0, - }, - }, - "account": { - "id": 8324, - "name": "MSNBC", - "handle": "msnbc", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15741035_1414682885294626_1846918595507309997_n.jpg?_nc_cat=1&_nc_oc=AQmNSDImiJ4dNS4a9BuTF3tFyF2W0xSOLxgQfdY6R_AXaZm8hkQc6XT-GWy5NIEe080&_nc_ht=scontent.xx&oh=968e2c2f1d76f19278ac5985b55af46d&oe=5E003BB2", - "subscriberCount": 2290512, - "url": "https://www.facebook.com/273864989376427", - "platform": "Facebook", - "platformId": "273864989376427", - "verified": True, - }, - }, - { - "id": 70164500985, - "platformId": "1435071773455316_2255755588053593", - "platform": "Facebook", - "date": "2019-09-07 21:16:33", - "updated": "2019-09-08 00:27:30", - "type": "link", - "title": "Tony Perkins On Mass Shootings: ‘The Problem Is Not The Absence Of Laws; It’s An Absence Of Morality’", - "caption": "dailywire.com", - "description": "On September 1, former police officer and President of the Family Research Council (FRC) Tony Perkins appeared on Fox News to discuss the shooting in Odessa, Texas.", - "message": '"We’ve driven religion from our public life, and we\'re shocked that we no longer have morality and we no longer value human life."', - "expandedLinks": [ - { - "original": "http://dlvr.it/RCgJkW", - "expanded": "https://www.dailywire.com/news/51501/tony-perkins-mass-shootings-problem-not-absence-frank-camp?utm_source=facebook&utm_medium=social&utm_campaign=dwbrand", - } - ], - "link": "http://dlvr.it/RCgJkW", - "postUrl": "https://www.facebook.com/DailyWire/posts/2255755588053593", - "subscriberCount": 1934539, - "score": 5.293103448275862, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQB1Mu1MvMS8dD0w&w=720&h=720&url=https%3A%2F%2Fwww.dailywire.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2Fopen_graph%2Fpublic%2Fuploads%2F2019%2F09%2Fd5f463cb-13ac-4cf1-869e-54c62197ef38.jpeg%3Fitok%3Dkjy8V7cH&cfs=1&_nc_hash=AQBtAC5c8_9jGNKY", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDnXhV0fMoQ4Rf2&url=https%3A%2F%2Fwww.dailywire.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2Fopen_graph%2Fpublic%2Fuploads%2F2019%2F09%2Fd5f463cb-13ac-4cf1-869e-54c62197ef38.jpeg%3Fitok%3Dkjy8V7cH&_nc_hash=AQBdy8C2aVF71Aox", - } - ], - "statistics": { - "actual": { - "likeCount": 391, - "shareCount": 133, - "commentCount": 40, - "loveCount": 23, - "wowCount": 0, - "hahaCount": 0, - "sadCount": 27, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 33, - "shareCount": 13, - "commentCount": 24, - "loveCount": 3, - "wowCount": 6, - "hahaCount": 17, - "sadCount": 4, - "angryCount": 16, - "thankfulCount": 0, - }, - }, - "account": { - "id": 650861, - "name": "Daily Wire", - "handle": "DailyWire", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/27655057_1815794295383060_2228253987427136016_n.png?_nc_cat=1&_nc_oc=AQm_uPD8ZwlgfmUIjiJBxewrWpNXIPkUpDdGdWdkYu9LXrRzIuUYx8pGdp5Kmcz1HU8&_nc_ht=scontent.xx&oh=ab8e2768dce63a6200349ce2d7dc8a11&oe=5DF6BB9F", - "subscriberCount": 1934601, - "url": "https://www.facebook.com/1435071773455316", - "platform": "Facebook", - "platformId": "1435071773455316", - "verified": True, - }, - }, - { - "id": 70161018459, - "platformId": "10716057521_10156407106532522", - "platform": "Facebook", - "date": "2019-09-07 20:00:00", - "updated": "2019-09-08 00:22:00", - "type": "link", - "title": "The House is investigating why a routine military trip to the Middle East stopped at Trump's resort in Scotland", - "caption": "theweek.com", - "description": "The House Oversight Committee has been investigating why the crew of a C-17 military transport plane making a routine trip from the United States to Kuwait to deliver supplies stayed at President Trump's Turnberry resort in Scotland, Politico reports. The House panel reportedly sent a letter to the....", - "message": '"The committee will be forced to consider alternative steps if the Pentagon does not begin complying voluntarily in the coming days.":', - "expandedLinks": [ - { - "original": "https://theweek.com/speedreads/863494/house-investigating-why-routine-military-trip-middle-east-stopped-trumps-resort-scotland?utm_source=links&utm_medium=website&utm_campaign=facebook", - "expanded": "https://theweek.com/speedreads/863494/house-investigating-why-routine-military-trip-middle-east-stopped-trumps-resort-scotland?utm_source=links&utm_medium=website&utm_campaign=facebook", - } - ], - "link": "https://theweek.com/speedreads/863494/house-investigating-why-routine-military-trip-middle-east-stopped-trumps-resort-scotland?utm_source=links&utm_medium=website&utm_campaign=facebook", - "postUrl": "https://www.facebook.com/theweek/posts/10156407106532522", - "subscriberCount": 419619, - "score": 5.277777777777778, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQAL4RYh8-R5KEjD&w=720&h=720&url=https%3A%2F%2Fimages.theweek.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2Ftw_image_6_4%2Fpublic%2Fgettyimages-998481948.jpg%3Fitok%3D2NNqgY23&cfs=1&_nc_hash=AQDlZL4Hgb09INsh", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAYZfhWWCLhvXQO&url=https%3A%2F%2Fimages.theweek.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2Ftw_image_6_4%2Fpublic%2Fgettyimages-998481948.jpg%3Fitok%3D2NNqgY23&_nc_hash=AQDOJVAtl325JCyk", - } - ], - "statistics": { - "actual": { - "likeCount": 136, - "shareCount": 81, - "commentCount": 42, - "loveCount": 0, - "wowCount": 37, - "hahaCount": 8, - "sadCount": 4, - "angryCount": 72, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 18, - "shareCount": 9, - "commentCount": 13, - "loveCount": 6, - "wowCount": 5, - "hahaCount": 10, - "sadCount": 4, - "angryCount": 7, - "thankfulCount": 0, - }, - }, - "account": { - "id": 15634, - "name": "THE WEEK", - "handle": "theweek", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/21462349_10154703764612522_6251347795371418727_n.jpg?_nc_cat=110&_nc_oc=AQlbKAutDhFEqh0Qx-B7AQHdixiuSL6cSzNkUlV8rK3NiEkp4i1D4Fr32CCce7z6lUA&_nc_ht=scontent.xx&oh=70ad5528b8f299f3aff2b696de103f35&oe=5E0AC355", - "subscriberCount": 419619, - "url": "https://www.facebook.com/10716057521", - "platform": "Facebook", - "platformId": "10716057521", - "verified": True, - }, - }, - { - "id": 70162137656, - "platformId": "519305544814653_2705955072816345", - "platform": "Facebook", - "date": "2019-09-07 20:30:00", - "updated": "2019-09-08 00:43:12", - "type": "photo", - "expandedLinks": [ - { - "original": "https://www.facebook.com/theconservativetribune/photos/a.520106441401230/2705954829483036/?type=3", - "expanded": "https://www.facebook.com/theconservativetribune/photos/a.520106441401230/2705954829483036/?type=3", - } - ], - "link": "https://www.facebook.com/theconservativetribune/photos/a.520106441401230/2705954829483036/?type=3", - "postUrl": "https://www.facebook.com/theconservativetribune/posts/2705955072816345", - "subscriberCount": 4272313, - "score": 5.141030392883618, - "media": [ - { - "type": "photo", - "url": "https://scontent.xx.fbcdn.net/v/t1.0-9/p720x720/69969764_2705954832816369_3306078688440745984_n.jpg?_nc_cat=107&_nc_oc=AQmIHRqDlQ1NjP1w0QuHG0ik274DNEJIqy4_inibBtrShHzoLkx0TBFnDKT3s4KJbjI&_nc_ht=scontent.xx&oh=b8f70055db120a5004f777bef149643a&oe=5DC9826E", - "height": 720, - "width": 720, - "full": "https://scontent.xx.fbcdn.net/v/t1.0-9/p720x720/69969764_2705954832816369_3306078688440745984_n.jpg?_nc_cat=107&_nc_oc=AQmIHRqDlQ1NjP1w0QuHG0ik274DNEJIqy4_inibBtrShHzoLkx0TBFnDKT3s4KJbjI&_nc_ht=scontent.xx&oh=b8f70055db120a5004f777bef149643a&oe=5DC9826E", - } - ], - "statistics": { - "actual": { - "likeCount": 16507, - "shareCount": 8305, - "commentCount": 803, - "loveCount": 1967, - "wowCount": 30, - "hahaCount": 102, - "sadCount": 7, - "angryCount": 20, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 3901, - "shareCount": 948, - "commentCount": 249, - "loveCount": 197, - "wowCount": 22, - "hahaCount": 34, - "sadCount": 21, - "angryCount": 24, - "thankfulCount": 0, - }, - }, - "account": { - "id": 48733, - "name": "Conservative Tribune by WJ", - "handle": "theconservativetribune", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/46353000_2202571529821371_2816384259860725760_n.png?_nc_cat=1&_nc_oc=AQmyLmtQSJjNV6pRGGi1jlDx51XV7ULbxHYoibyNBKmronK_dpS07OVljvF5-BdX07s&_nc_ht=scontent.xx&oh=eeade969630115fc0c1ec64d4a462e0f&oe=5DF58CE0", - "subscriberCount": 4272095, - "url": "https://www.facebook.com/519305544814653", - "platform": "Facebook", - "platformId": "519305544814653", - "verified": True, - }, - }, - { - "id": 70169831725, - "platformId": "119984188013847_2736300513048855", - "platform": "Facebook", - "date": "2019-09-07 22:47:53", - "updated": "2019-09-08 00:45:59", - "type": "link", - "title": "Buttigieg: God will punish us for global warming - WND", - "caption": "wnd.com", - "description": "This argument should turn the staunchest conservative into a daily campaigner against global warming, or climate change as it's called since the warming stopped.", - "message": "'Mayor Pete' becomes 'Preacher Pete' – warns of God's coming judgment Democrat presidential candidate Pete Buttigieg has made his mark in the crowded field by touting his homosexuality and \"marriage\" to another man, his support of abortion up to the moment of birth and his attacks on Vice President Mike Pence and conservative Christians. In his latest move against the faithful, he has taken on the role of theologian and prophet, warning of God's coming judgment against this generation. For what sin, you ask. Well, Prophet Pete has just made up a new one ...", - "expandedLinks": [ - { - "original": "https://www.wnd.com/2019/09/buttigieg-god-will-punish-us-global-warming/", - "expanded": "https://www.wnd.com/2019/09/buttigieg-god-will-punish-us-global-warming/", - } - ], - "link": "https://www.wnd.com/2019/09/buttigieg-god-will-punish-us-global-warming/", - "postUrl": "https://www.facebook.com/WNDNews/posts/2736300513048855", - "subscriberCount": 847184, - "score": 5.123376623376624, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCDOi5uHIJ3tq_P&w=300&h=300&url=https%3A%2F%2Fwww.wnd.com%2Fwp-content%2Fuploads%2F2019%2F04%2Fmayor-pete44.jpg&cfs=1&sx=197&sy=0&sw=300&sh=300&_nc_hash=AQA6CnG84b2r7UwJ", - "height": 300, - "width": 300, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQAKcW4hQUbLE4Nb&url=https%3A%2F%2Fwww.wnd.com%2Fwp-content%2Fuploads%2F2019%2F04%2Fmayor-pete44.jpg&_nc_hash=AQC4O0UNvIXSCQ9E", - } - ], - "statistics": { - "actual": { - "likeCount": 59, - "shareCount": 174, - "commentCount": 701, - "loveCount": 0, - "wowCount": 25, - "hahaCount": 408, - "sadCount": 24, - "angryCount": 187, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 83, - "shareCount": 77, - "commentCount": 52, - "loveCount": 5, - "wowCount": 11, - "hahaCount": 12, - "sadCount": 7, - "angryCount": 61, - "thankfulCount": 0, - }, - }, - "account": { - "id": 816605, - "name": "WND", - "handle": "WNDNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/10616184_978685205477070_7301123703638589430_n.jpg?_nc_cat=110&_nc_oc=AQm5V5YpP7PucYw6lh5UcBTbvWDxAw3jNZpGGnOpem7RUhl7KQuT_0RFS9UItcAmqL8&_nc_ht=scontent.xx&oh=42799b825016837895356c7b53b45526&oe=5E0F6F64", - "subscriberCount": 847147, - "url": "https://www.facebook.com/119984188013847", - "platform": "Facebook", - "platformId": "119984188013847", - "verified": False, - }, - }, - { - "id": 70170627801, - "platformId": "273864989376427_2990195337743365", - "platform": "Facebook", - "date": "2019-09-07 23:04:12", - "updated": "2019-09-08 00:44:08", - "type": "link", - "title": "Pentagon takes money from military schools, more for border wall", - "caption": "msnbc.com", - "description": "The Donald Trump administration has stated plans to take more than $3.6 billion out of military projects including money that was supposed to pay for schools, and military bases, for a total of 127 projects put on hold, to divert monies to the U.S.-Mexico border wall. Joy Reid and her panel discuss.", - "message": '"Republicans are discovering that their utter loyalty and utter obedience to Donald Trump will do absolutely nothing to stop him from stealing money out of their states… to build the border wall." - Joy Reid', - "expandedLinks": [ - { - "original": "https://on.msnbc.com/2LCCYNq", - "expanded": "https://www.msnbc.com/am-joy/watch/trump-admin-diverts-3-6b-in-military-funding-to-border-wall-68438085927?cid=sm_npd_ms_fb_ma&fbclid=IwAR3zIMmwFSYrPw7tXLvH09swHvwio_08X1N3e6olJ2VFajIqQzAhlcquifM", - } - ], - "link": "https://on.msnbc.com/2LCCYNq", - "postUrl": "https://www.facebook.com/msnbc/posts/2990195337743365", - "subscriberCount": 2290452, - "score": 5.096345514950166, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBmWxdfX-Y0YKeK&w=720&h=720&url=https%3A%2F%2Fmedia11.s-nbcnews.com%2Fj%2FMSNBC%2FComponents%2FVideo%2F201909%2Fn_joy_trumpwall_190907_1920x1080.nbcnews-fp-1200-630.jpg&cfs=1&_nc_hash=AQCq-YM-F5lNiiu7", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCRqhvaoLdT2olR&url=https%3A%2F%2Fmedia11.s-nbcnews.com%2Fj%2FMSNBC%2FComponents%2FVideo%2F201909%2Fn_joy_trumpwall_190907_1920x1080.nbcnews-fp-1200-630.jpg&_nc_hash=AQCBPi5HM3ivKDck", - } - ], - "statistics": { - "actual": { - "likeCount": 205, - "shareCount": 195, - "commentCount": 299, - "loveCount": 7, - "wowCount": 22, - "hahaCount": 110, - "sadCount": 42, - "angryCount": 654, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 72, - "shareCount": 40, - "commentCount": 80, - "loveCount": 6, - "wowCount": 12, - "hahaCount": 27, - "sadCount": 16, - "angryCount": 48, - "thankfulCount": 0, - }, - }, - "account": { - "id": 8324, - "name": "MSNBC", - "handle": "msnbc", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15741035_1414682885294626_1846918595507309997_n.jpg?_nc_cat=1&_nc_oc=AQmNSDImiJ4dNS4a9BuTF3tFyF2W0xSOLxgQfdY6R_AXaZm8hkQc6XT-GWy5NIEe080&_nc_ht=scontent.xx&oh=968e2c2f1d76f19278ac5985b55af46d&oe=5E003BB2", - "subscriberCount": 2290512, - "url": "https://www.facebook.com/273864989376427", - "platform": "Facebook", - "platformId": "273864989376427", - "verified": True, - }, - }, - { - "id": 70172309273, - "platformId": "5281959998_10152010138044999", - "platform": "Facebook", - "date": "2019-09-07 23:30:12", - "updated": "2019-09-08 00:47:27", - "type": "link", - "title": "Trump Says He’s Called Off Negotiations With Taliban After Afghanistan Bombing", - "caption": "nytimes.com", - "description": "The president said he had canceled a secret meeting at Camp David with Taliban leaders and the president of Afghanistan.", - "message": "President Trump said on Saturday that he had canceled a secret meeting at Camp David with Taliban leaders and the president of Afghanistan and has called off negotiations with the Afghan insurgent group that were close to a peace agreement months in the making.", - "expandedLinks": [ - { - "original": "https://www.nytimes.com/2019/09/07/us/politics/trump-taliban-afghanistan.html", - "expanded": "https://www.nytimes.com/2019/09/07/us/politics/trump-taliban-afghanistan.html", - } - ], - "link": "https://www.nytimes.com/2019/09/07/us/politics/trump-taliban-afghanistan.html", - "postUrl": "https://www.facebook.com/nytimes/posts/10152010138044999", - "subscriberCount": 16854203, - "score": 5.086956521739131, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQDGhnO9QS8cKTun&w=720&h=720&url=https%3A%2F%2Fstatic01.nyt.com%2Fnewsgraphics%2Fimages%2Ficons%2FdefaultPromoCrop.png&cfs=1&_nc_hash=AQBIS03W5Q8pqINR", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBNcmcrufntg-Bu&url=https%3A%2F%2Fstatic01.nyt.com%2Fnewsgraphics%2Fimages%2Ficons%2FdefaultPromoCrop.png&_nc_hash=AQCcLkO5R7qvqmT3", - } - ], - "statistics": { - "actual": { - "likeCount": 183, - "shareCount": 214, - "commentCount": 579, - "loveCount": 1, - "wowCount": 147, - "hahaCount": 528, - "sadCount": 18, - "angryCount": 85, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 142, - "shareCount": 52, - "commentCount": 68, - "loveCount": 10, - "wowCount": 16, - "hahaCount": 22, - "sadCount": 16, - "angryCount": 19, - "thankfulCount": 0, - }, - }, - "account": { - "id": 7132, - "name": "The New York Times", - "handle": "nytimes", - "profileImage": "https://scontent.xx.fbcdn.net/v/t34.0-1/p200x200/38987133_2766049203424553_1238434690_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkaWRCuHf9GL6ACpzc33xhzk0PaoZZpZJjgHAUJqYB_x5SH2TI2LqBRTlosS59Dtlw&_nc_ht=scontent.xx&oh=6c30114417175d395e99d2e75167ad16&oe=5D765D57", - "subscriberCount": 16854715, - "url": "https://www.facebook.com/5281959998", - "platform": "Facebook", - "platformId": "5281959998", - "verified": True, - }, - }, - { - "id": 70165217975, - "platformId": "1435071773455316_2255764244719394", - "platform": "Facebook", - "date": "2019-09-07 21:32:02", - "updated": "2019-09-08 00:27:30", - "type": "link", - "title": "Democrats To Take Formal Steps On Impeachment Next Week", - "caption": "dailywire.com", - "description": "The Democrat-controlled House Judiciary Committee is set to vote next week on a resolution laying out the procedures that it will use for its investigation as they are reportedly considering moving to impeach President Donald Trump.", - "message": "\"It is expected to follow the precedent set in 1974 over the committee's procedures during then-President Richard Nixon's impeachment proceedings.\"", - "expandedLinks": [ - { - "original": "http://dlvr.it/RCgKr7", - "expanded": "https://www.dailywire.com/news/51495/breaking-democrats-take-formal-steps-impeachment-ryan-saavedra?utm_campaign=dwbrand", - } - ], - "link": "http://dlvr.it/RCgKr7", - "postUrl": "https://www.facebook.com/DailyWire/posts/2255764244719394", - "subscriberCount": 1934539, - "score": 5.064814814814815, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQAL34Moo0djcN3q&w=630&h=630&url=https%3A%2F%2Fwww.dailywire.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2Fopen_graph%2Fpublic%2Fuploads%2F2019%2F09%2Fjerry_nadler.jpg%3Fitok%3D0q7hDEmL&cfs=1&sx=179&sy=0&sw=630&sh=630&_nc_hash=AQDqkzXDIf7GjDhh", - "height": 630, - "width": 630, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQD5qWTKc9agh4Wt&url=https%3A%2F%2Fwww.dailywire.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2Fopen_graph%2Fpublic%2Fuploads%2F2019%2F09%2Fjerry_nadler.jpg%3Fitok%3D0q7hDEmL&_nc_hash=AQA8SoamYZ0MfJAJ", - } - ], - "statistics": { - "actual": { - "likeCount": 31, - "shareCount": 19, - "commentCount": 220, - "loveCount": 3, - "wowCount": 4, - "hahaCount": 153, - "sadCount": 5, - "angryCount": 112, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 30, - "shareCount": 13, - "commentCount": 21, - "loveCount": 3, - "wowCount": 6, - "hahaCount": 16, - "sadCount": 4, - "angryCount": 15, - "thankfulCount": 0, - }, - }, - "account": { - "id": 650861, - "name": "Daily Wire", - "handle": "DailyWire", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/27655057_1815794295383060_2228253987427136016_n.png?_nc_cat=1&_nc_oc=AQm_uPD8ZwlgfmUIjiJBxewrWpNXIPkUpDdGdWdkYu9LXrRzIuUYx8pGdp5Kmcz1HU8&_nc_ht=scontent.xx&oh=ab8e2768dce63a6200349ce2d7dc8a11&oe=5DF6BB9F", - "subscriberCount": 1934601, - "url": "https://www.facebook.com/1435071773455316", - "platform": "Facebook", - "platformId": "1435071773455316", - "verified": True, - }, - }, - { - "id": 70161881054, - "platformId": "147772245840_10156546913410841", - "platform": "Facebook", - "date": "2019-09-07 20:25:03", - "updated": "2019-09-08 00:33:20", - "type": "link", - "title": "Kirstie Alley Calls Out Hollywood Hypocrites for Refusing to Work With Republicans", - "caption": "chicksonright.com", - "description": "I remember Kirstie Alley best for It Takes Two (with Mary Kate and Ashley Olsen, age 9) and the TV movie Toothless, both of which came out in the mid-1990s. I couldn't tell you what she's been", - "message": "🙌🙌🙌", - "expandedLinks": [ - { - "original": "https://www.chicksonright.com/opinion/2019/09/07/kirstie-alley-calls-out-hollywood-hypocrites-for-refusing-to-work-with-republicans/", - "expanded": "https://www.chicksonright.com/opinion/2019/09/07/kirstie-alley-calls-out-hollywood-hypocrites-for-refusing-to-work-with-republicans/", - } - ], - "link": "https://www.chicksonright.com/opinion/2019/09/07/kirstie-alley-calls-out-hollywood-hypocrites-for-refusing-to-work-with-republicans/", - "postUrl": "https://www.facebook.com/TheYoungCons/posts/10156546913410841", - "subscriberCount": 999933, - "score": 4.986301369863014, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQA-a89n7sUHEZ_k&w=720&h=720&url=https%3A%2F%2Fwww.chicksonright.com%2Fopinion%2Fwp-content%2Fuploads%2Fsites%2F6%2F2019%2F09%2FScreen-Shot-2019-09-07-at-11.57.14-AM.png&cfs=1&_nc_hash=AQDoIHiSAC6cMMwx", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDuGI7txbVqNakz&url=https%3A%2F%2Fwww.chicksonright.com%2Fopinion%2Fwp-content%2Fuploads%2Fsites%2F6%2F2019%2F09%2FScreen-Shot-2019-09-07-at-11.57.14-AM.png&_nc_hash=AQCiFQlBSx-BnMpT", - } - ], - "statistics": { - "actual": { - "likeCount": 535, - "shareCount": 62, - "commentCount": 63, - "loveCount": 58, - "wowCount": 7, - "hahaCount": 2, - "sadCount": 0, - "angryCount": 1, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 27, - "shareCount": 19, - "commentCount": 38, - "loveCount": 3, - "wowCount": 5, - "hahaCount": 14, - "sadCount": 4, - "angryCount": 36, - "thankfulCount": 0, - }, - }, - "account": { - "id": 48734, - "name": "Young Conservatives", - "handle": "TheYoungCons", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/45427184_10155855954205841_8373169778709233664_n.jpg?_nc_cat=1&_nc_oc=AQmAgxZhqj9CXmiY228VRUATEHxELlR7p8BpNguYxOU_n6uxWw17ssXZSIF2mv2DreA&_nc_ht=scontent.xx&oh=ea10aeb60d4d31efb95e2c0a9f7ee098&oe=5DFE69A3", - "subscriberCount": 1000057, - "url": "https://www.facebook.com/147772245840", - "platform": "Facebook", - "platformId": "147772245840", - "verified": False, - }, - }, - { - "id": 70168355148, - "platformId": "10643211755_10158355674951756", - "platform": "Facebook", - "date": "2019-09-07 22:32:03", - "updated": "2019-09-08 00:23:24", - "type": "link", - "title": "Opinion: Earth Has Survived Extinctions Before, It's Humans Who Are Fragile", - "caption": "npr.org", - "description": "Earth has experienced cataclysmic life-destroying events before. NPR's Scott Simon reflects on what this means for humans in the midst of climate change.", - "message": "Opinion: Earth has endured cataclysmic life-destroying events before. NPR's Scott Simon says it's us –– all the living things that inhabit it for a while –– who are fragile.", - "expandedLinks": [ - { - "original": "https://trib.al/gZNYTAo", - "expanded": "https://www.npr.org/2019/09/07/758448991/opinion-earth-has-survived-extinctions-before-its-humans-who-are-fragile?utm_source=facebook.com&utm_medium=social&utm_term=nprnews&utm_campaign=npr", - } - ], - "link": "https://trib.al/gZNYTAo", - "postUrl": "https://www.facebook.com/NPR/posts/10158355674951756", - "subscriberCount": 6596234, - "score": 4.9751724137931035, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBmP5_vhoAZ-PzY&w=720&h=720&url=https%3A%2F%2Fmedia.npr.org%2Fassets%2Fimg%2F2019%2F09%2F06%2Fgettyimages-1163083905_wide-aeecc15a41bef8f3ab6960ecdd682dd88366ce2b.jpg%3Fs%3D1400&cfs=1&_nc_hash=AQCSAlFPZxwX1a8K", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBfeWK671yKDWXM&url=https%3A%2F%2Fmedia.npr.org%2Fassets%2Fimg%2F2019%2F09%2F06%2Fgettyimages-1163083905_wide-aeecc15a41bef8f3ab6960ecdd682dd88366ce2b.jpg%3Fs%3D1400&_nc_hash=AQAwqEqQcFc87LZi", - } - ], - "statistics": { - "actual": { - "likeCount": 2322, - "shareCount": 727, - "commentCount": 297, - "loveCount": 108, - "wowCount": 31, - "hahaCount": 46, - "sadCount": 72, - "angryCount": 4, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 260, - "shareCount": 161, - "commentCount": 100, - "loveCount": 15, - "wowCount": 48, - "hahaCount": 14, - "sadCount": 79, - "angryCount": 48, - "thankfulCount": 0, - }, - }, - "account": { - "id": 6149, - "name": "NPR", - "handle": "NPR", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/392453_10150756268711756_1078337478_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQkCimbrOrcgFhsAxAA1U5koNLGX9OLyOXdvEKxfRI0_6KYiFljw87Kls85nrj6clWA&_nc_ht=scontent.xx&oh=1883b0436c2dd854062b47c02250e87b&oe=5DF7D154", - "subscriberCount": 6596236, - "url": "https://www.facebook.com/10643211755", - "platform": "Facebook", - "platformId": "10643211755", - "verified": True, - }, - }, - { - "id": 70169812970, - "platformId": "9258148868_10156759046683869", - "platform": "Facebook", - "date": "2019-09-07 22:45:06", - "updated": "2019-09-08 00:45:29", - "type": "link", - "title": "William Steig’s Books Explored the Reality That Adults Don’t Want Children to Know About", - "caption": "newyorker.com", - "description": "Steig has a gift for stories that feel like fables or folktales, didactic forms that require a kind of frankness.", - "message": "Only a select handful of geniuses can manage to amuse both the kid being read to and the adult doing the reading. William Steig is one.", - "expandedLinks": [ - { - "original": "http://nyer.cm/exs5OOG", - "expanded": "https://www.newyorker.com/books/page-turner/william-steigs-books-explored-the-reality-that-adults-dont-want-children-to-know-about?utm_social-type=owned&utm_medium=social&utm_source=facebook&mbid=social_facebook&utm_brand=tny", - } - ], - "link": "http://nyer.cm/exs5OOG", - "postUrl": "https://www.facebook.com/newyorker/posts/10156759046683869", - "subscriberCount": 4287168, - "score": 4.913978494623656, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQC-HRIqGdFahnqE&w=720&h=720&url=https%3A%2F%2Fmedia.newyorker.com%2Fphotos%2F5cf818f247f7cc3a165070ad%2F16%3A9%2Fw_1200%2Ch_630%2Cc_limit%2FKidLit-SteigAmosandBoris-Social.jpg&cfs=1&_nc_hash=AQB-4qDEgnPqpwyJ", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQC6TrF4FdlN1wga&url=https%3A%2F%2Fmedia.newyorker.com%2Fphotos%2F5cf818f247f7cc3a165070ad%2F16%3A9%2Fw_1200%2Ch_630%2Cc_limit%2FKidLit-SteigAmosandBoris-Social.jpg&_nc_hash=AQDfMTbJzUyDjhsx", - } - ], - "statistics": { - "actual": { - "likeCount": 264, - "shareCount": 98, - "commentCount": 19, - "loveCount": 71, - "wowCount": 2, - "hahaCount": 3, - "sadCount": 0, - "angryCount": 0, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 33, - "shareCount": 13, - "commentCount": 9, - "loveCount": 6, - "wowCount": 4, - "hahaCount": 7, - "sadCount": 8, - "angryCount": 13, - "thankfulCount": 0, - }, - }, - "account": { - "id": 10284, - "name": "The New Yorker", - "handle": "newyorker", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1174822_10151878824588869_2070117374_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQno9Opk1N_2uuxM9xMCbaLh-8w7vk3rWYzY5iX2B0axGmTGyU1kkZY1RTndOiqUuAE&_nc_ht=scontent.xx&oh=e4a5a2194344ddb52a1e83254332bea3&oe=5DC7CED7", - "subscriberCount": 4287325, - "url": "https://www.facebook.com/9258148868", - "platform": "Facebook", - "platformId": "9258148868", - "verified": True, - }, - }, - { - "id": 70171946856, - "platformId": "182919686769_10156515246296770", - "platform": "Facebook", - "date": "2019-09-07 23:30:09", - "updated": "2019-09-08 00:40:44", - "type": "link", - "title": "Judge Rules In Favor Of CAIR, Says Terrorism Watchlist Violates Constitutional Rights", - "caption": "dailycaller.com", - "description": "'Muslim registry'", - "message": "Wow.", - "expandedLinks": [ - { - "original": "https://dailycaller.com/2019/09/05/cair-watchlist-lawsuit-court-islam/", - "expanded": "https://dailycaller.com/2019/09/05/cair-watchlist-lawsuit-court-islam/", - } - ], - "link": "https://dailycaller.com/2019/09/05/cair-watchlist-lawsuit-court-islam/", - "postUrl": "https://www.facebook.com/DailyCaller/posts/10156515246296770", - "subscriberCount": 5408428, - "score": 4.911174785100287, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQCzsDerqgeCLSlN&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f149ca9a410b2d40e794%2Fdbf3f811d3755e394f3369a84be4edc1ad58ca87_07dc201ff65866ba4d8a98a258bd2c75d6fadb3c_facebook&cfs=1&_nc_hash=AQC2WjhtmJYH0nh0", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQBOEhLoSnrmguB7&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f149ca9a410b2d40e794%2Fdbf3f811d3755e394f3369a84be4edc1ad58ca87_07dc201ff65866ba4d8a98a258bd2c75d6fadb3c_facebook&_nc_hash=AQC4PH3l5geEUt5p", - } - ], - "statistics": { - "actual": { - "likeCount": 42, - "shareCount": 303, - "commentCount": 486, - "loveCount": 0, - "wowCount": 60, - "hahaCount": 16, - "sadCount": 25, - "angryCount": 782, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 96, - "shareCount": 47, - "commentCount": 96, - "loveCount": 8, - "wowCount": 13, - "hahaCount": 47, - "sadCount": 6, - "angryCount": 36, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13489, - "name": "The Daily Caller", - "handle": "DailyCaller", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471", - "subscriberCount": 5408115, - "url": "https://www.facebook.com/182919686769", - "platform": "Facebook", - "platformId": "182919686769", - "verified": True, - }, - }, - { - "id": 70160023793, - "platformId": "86680728811_10158783301598812", - "platform": "Facebook", - "date": "2019-09-07 19:47:06", - "updated": "2019-09-08 00:45:25", - "type": "link", - "title": "To help Trump, GOP considering canceling at least 3 primaries and caucuses", - "caption": "abcnews.go.com", - "description": " ", - "message": "At least three states are considering plans to cancel their 2020 GOP presidential primaries and caucuses, prompting both of President Donald J. Trump's long-shot primary opponents to rail against the stunning, but not unprecedented move that would virtually cripple their chances.", - "expandedLinks": [ - { - "original": "https://abcn.ws/2Luw50G", - "expanded": "https://abcnews.go.com/Politics/trump-gop-canceling-gop-primaries-caucuses/story?id=65436462&cid=social_fb_abcn", - } - ], - "link": "https://abcn.ws/2Luw50G", - "postUrl": "https://www.facebook.com/ABCNews/posts/10158783301598812", - "subscriberCount": 14195962, - "score": 4.905707196029777, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?d=AQBPkzmGXGnYi4PV&w=558&h=558&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FPolitics%2Ftrump-gop-01-as-190906_hpMain_16x9_992.jpg&cfs=1&sx=434&sy=0&sw=558&sh=558&_nc_hash=AQCKoUX8TXoRMeqA", - "height": 558, - "width": 558, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQDGWHKJw8aw1GU2&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FPolitics%2Ftrump-gop-01-as-190906_hpMain_16x9_992.jpg&_nc_hash=AQBfcfAqZ3cLCQDZ", - } - ], - "statistics": { - "actual": { - "likeCount": 229, - "shareCount": 281, - "commentCount": 447, - "loveCount": 26, - "wowCount": 81, - "hahaCount": 141, - "sadCount": 36, - "angryCount": 736, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 171, - "shareCount": 81, - "commentCount": 65, - "loveCount": 20, - "wowCount": 21, - "hahaCount": 17, - "sadCount": 17, - "angryCount": 11, - "thankfulCount": 0, - }, - }, - "account": { - "id": 13878, - "name": "ABC News", - "handle": "ABCNews", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02", - "subscriberCount": 14196629, - "url": "https://www.facebook.com/86680728811", - "platform": "Facebook", - "platformId": "86680728811", - "verified": True, - }, - }, - { - "id": 70159920653, - "platformId": "83865976093_10156707520461094", - "platform": "Facebook", - "date": "2019-09-07 19:50:00", - "updated": "2019-09-08 00:42:30", - "type": "link", - "title": 'US Beekeepers File Suit Against EPA Charging "Illegal" Approval of Insecticide', - "caption": "truthout.org", - "description": "Scientists have warned that sulfoxaflor is part of the massive pollinator die-off across the U.S.", - "message": "The lawsuit charges that the EPA's approval of sulfoxaflor was illegally rendered as it put industry interests ahead of the health of pollinators and ignored the available science.", - "expandedLinks": [ - { - "original": "https://truthout.org/articles/us-beekeepers-file-suit-against-epa-charging-illegalapproval-of-insecticide/", - "expanded": "https://truthout.org/articles/us-beekeepers-file-suit-against-epa-charging-illegalapproval-of-insecticide/", - } - ], - "link": "https://truthout.org/articles/us-beekeepers-file-suit-against-epa-charging-illegalapproval-of-insecticide/", - "postUrl": "https://www.facebook.com/truthout/posts/10156707520461094", - "subscriberCount": 754215, - "score": 4.839160839160839, - "media": [ - { - "type": "photo", - "url": "https://external.xx.fbcdn.net/safe_image.php?w=720&h=720&url=https%3A%2F%2Ftruthout.org%2Fwp-content%2Fuploads%2F2019%2F09%2F19476616_0bfa08ace9_o.jpg&cfs=1&_nc_hash=AQBpmlkNTDdVJqxR", - "height": 720, - "width": 720, - "full": "https://external.xx.fbcdn.net/safe_image.php?d=AQCzPuIuejiQOYBx&url=https%3A%2F%2Ftruthout.org%2Fwp-content%2Fuploads%2F2019%2F09%2F19476616_0bfa08ace9_o.jpg&_nc_hash=AQCwh3_YUN6BH0z-", - } - ], - "statistics": { - "actual": { - "likeCount": 389, - "shareCount": 212, - "commentCount": 10, - "loveCount": 62, - "wowCount": 9, - "hahaCount": 0, - "sadCount": 5, - "angryCount": 5, - "thankfulCount": 0, - }, - "expected": { - "likeCount": 30, - "shareCount": 42, - "commentCount": 11, - "loveCount": 5, - "wowCount": 6, - "hahaCount": 3, - "sadCount": 11, - "angryCount": 35, - "thankfulCount": 0, - }, - }, - "account": { - "id": 4007, - "name": "Truthout", - "handle": "truthout", - "profileImage": "https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/19894613_10154795655481094_2383393652303893841_n.jpg?_nc_cat=110&_nc_oc=AQkTbiRpAD3hZBOdyzgT1PgAhh4VQwvgi7_UrWwWRSAE_kE9X6Vo3lxn9jEYjOQ71yY&_nc_ht=scontent.xx&oh=a8046506973cb0fdb4deab6119ed03f5&oe=5DF988DD", - "subscriberCount": 754194, - "url": "https://www.facebook.com/83865976093", - "platform": "Facebook", - "platformId": "83865976093", - "verified": True, - }, - }, - ], - "pagination": {}, - }, + 'status': 200, + 'result': { + 'posts': [{ + 'id': 70157825663, + 'platformId': '6491828674_10157706657658675', + 'platform': 'Facebook', + 'date': '2019-09-07 19:04:03', + 'updated': '2019-09-08 00:41:10', + 'type': 'link', + 'title': "NOAA assailed for defending Trump's Hurricane Dorian claim", + 'caption': 'pbs.org', + 'description': "WASHINGTON — Former top officials of the National Oceanic and Atmospheric Administration are assailing the agency for undermining its weather forecasters as it defends President Donald Trump's statement from days ago that Hurricane Dorian threatened Alabama. They say NOAA's action risks the credib...", + 'message': 'Former top officials of the National Oceanic and Atmospheric Administration are assailing the agency for undermining its weather forecasters as it defends President Trump’s statement from days ago that Hurricane Dorian threatened Alabama.', + 'expandedLinks': [{ + 'original': 'https://www.pbs.org/newshour/politics/noaa-assailed-for-defending-trumps-hurricane-dorian-claim', + 'expanded': 'https://www.pbs.org/newshour/politics/noaa-assailed-for-defending-trumps-hurricane-dorian-claim' + }], + 'link': 'https://www.pbs.org/newshour/politics/noaa-assailed-for-defending-trumps-hurricane-dorian-claim', + 'postUrl': 'https://www.facebook.com/newshour/posts/10157706657658675', + 'subscriberCount': 1417173, + 'score': 36.10762331838565, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAQChtFBtJknmoi&w=720&h=720&url=https%3A%2F%2Fd3i6fh83elv35t.cloudfront.net%2Fstatic%2F2019%2F09%2FRTS2OSQS-1024x683.jpg&cfs=1&_nc_hash=AQCCwh6oo6LQnKD3', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAsEhaTWfH-MVd9&url=https%3A%2F%2Fd3i6fh83elv35t.cloudfront.net%2Fstatic%2F2019%2F09%2FRTS2OSQS-1024x683.jpg&_nc_hash=AQA7Rw511Jqi6fCQ' + }], + 'statistics': { + 'actual': { + 'likeCount': 3056, + 'shareCount': 1745, + 'commentCount': 1021, + 'loveCount': 63, + 'wowCount': 242, + 'hahaCount': 419, + 'sadCount': 204, + 'angryCount': 1302, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 71, + 'shareCount': 44, + 'commentCount': 36, + 'loveCount': 5, + 'wowCount': 11, + 'hahaCount': 10, + 'sadCount': 26, + 'angryCount': 20, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 7777, + 'name': 'PBS NewsHour', + 'handle': 'newshour', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/c2.0.200.200a/p200x200/303161_10150312469923675_881915800_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlncoeS4CvKUmO2uTUydTKWAioHD0iWx6bl9DqkBkwnCZgpb6CCkyZj7aidr38Ug1k&_nc_ht=scontent.xx&oh=0d6d1417f6b982eac877d479f2404a37&oe=5E0E2C5A', + 'subscriberCount': 1417219, + 'url': 'https://www.facebook.com/6491828674', + 'platform': 'Facebook', + 'platformId': '6491828674', + 'verified': True + } + }, { + 'id': 70175022660, + 'platformId': '155869377766434_3572995539387117', + 'platform': 'Facebook', + 'date': '2019-09-07 23:57:09', + 'updated': '2019-09-08 00:20:11', + 'type': 'link', + 'title': 'Trump says he was set to hold secret talks with Taliban at Camp David in the US', + 'caption': 'nbcnews.com', + 'description': ' ', + 'message': 'BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://nbcnews.to/34stfC2', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma&fbclid=IwAR0CBM_4FHMh8nmjiAlK-SwCMI5z15Uppifb0j2UFphPdoYI_7aib4nNkio' + }, { + 'original': 'https://nbcnews.to/34stfC2', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_nn_fb_ma&fbclid=IwAR0CBM_4FHMh8nmjiAlK-SwCMI5z15Uppifb0j2UFphPdoYI_7aib4nNkio' + }], + 'link': 'https://nbcnews.to/34stfC2', + 'postUrl': 'https://www.facebook.com/NBCNews/posts/3572995539387117', + 'subscriberCount': 9970622, + 'score': 35.17213114754098, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 321, + 'shareCount': 732, + 'commentCount': 1276, + 'loveCount': 10, + 'wowCount': 212, + 'hahaCount': 1133, + 'sadCount': 43, + 'angryCount': 564, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 27, + 'shareCount': 20, + 'commentCount': 25, + 'loveCount': 6, + 'wowCount': 9, + 'hahaCount': 12, + 'sadCount': 12, + 'angryCount': 11, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13889, + 'name': 'NBC News', + 'handle': 'NBCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/58460954_3259154034104604_4667908299973197824_n.png?_nc_cat=1&_nc_oc=AQkP72-xbAw6uUN-KZG8hLfS-bT5o6BRIMSNURKuXBbEhrFa7sT75fvZfTBZDVa21CU&_nc_ht=scontent.xx&oh=ddb1e61de6dabbf61e903f59efde1f0c&oe=5DF7A653', + 'subscriberCount': 9970540, + 'url': 'https://www.facebook.com/155869377766434', + 'platform': 'Facebook', + 'platformId': '155869377766434', + 'verified': True + } + }, { + 'id': 70166530441, + 'platformId': '5281959998_10152010079459999', + 'platform': 'Facebook', + 'date': '2019-09-07 22:01:31', + 'updated': '2019-09-08 00:47:27', + 'type': 'link', + 'title': '2019 U.S. Open Live Updates: Bianca Andreescu Defeats Serena Williams', + 'caption': 'nytimes.com', + 'description': 'Andreescu, a 19-year-old Canadian, won her first Grand Slam title, denying Williams her 24th.', + 'message': 'Breaking News: Bianca Andreescu, 19, has defeated Serena Williams to win the U.S. Open, capping a stunning rise to the top of women’s tennis.', + 'expandedLinks': [{ + 'original': 'https://www.nytimes.com/2019/09/07/sports/tennis/us-open-serena-williams-bianca-andreescu.html?smid=fb-nytimes&smtyp=cur', + 'expanded': 'https://www.nytimes.com/2019/09/07/sports/tennis/us-open-serena-williams-bianca-andreescu.html?smid=fb-nytimes&smtyp=cur' + }], + 'link': 'https://www.nytimes.com/2019/09/07/sports/tennis/us-open-serena-williams-bianca-andreescu.html?smid=fb-nytimes&smtyp=cur', + 'postUrl': 'https://www.facebook.com/nytimes/posts/10152010079459999', + 'subscriberCount': 16854203, + 'score': 35.01030927835052, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCq231_1hMcQsgV&w=550&h=550&url=https%3A%2F%2Fstatic01.nyt.com%2Fimages%2F2019%2F09%2F07%2Fsports%2F07open-women-live-serena2%2F07open-women-live-serena2-facebookJumbo.jpg&cfs=1&sx=340&sy=0&sw=550&sh=550&_nc_hash=AQBLh_V9dCVShHEK', + 'height': 550, + 'width': 550, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBU8mPAEayRbkd7&url=https%3A%2F%2Fstatic01.nyt.com%2Fimages%2F2019%2F09%2F07%2Fsports%2F07open-women-live-serena2%2F07open-women-live-serena2-facebookJumbo.jpg&_nc_hash=AQDiFMN6i1MXQLjS' + }], + 'statistics': { + 'actual': { + 'likeCount': 6493, + 'shareCount': 2008, + 'commentCount': 702, + 'loveCount': 1335, + 'wowCount': 2493, + 'hahaCount': 51, + 'sadCount': 479, + 'angryCount': 23, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 161, + 'shareCount': 61, + 'commentCount': 75, + 'loveCount': 11, + 'wowCount': 19, + 'hahaCount': 23, + 'sadCount': 17, + 'angryCount': 21, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 7132, + 'name': 'The New York Times', + 'handle': 'nytimes', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t34.0-1/p200x200/38987133_2766049203424553_1238434690_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkaWRCuHf9GL6ACpzc33xhzk0PaoZZpZJjgHAUJqYB_x5SH2TI2LqBRTlosS59Dtlw&_nc_ht=scontent.xx&oh=6c30114417175d395e99d2e75167ad16&oe=5D765D57', + 'subscriberCount': 16854715, + 'url': 'https://www.facebook.com/5281959998', + 'platform': 'Facebook', + 'platformId': '5281959998', + 'verified': True + } + }, { + 'id': 70161391741, + 'platformId': '13312631635_10157392232686636', + 'platform': 'Facebook', + 'date': '2019-09-07 20:18:42', + 'updated': '2019-09-08 00:42:43', + 'type': 'link', + 'title': "Amber Rudd resigns as Boris Johnson's government plunged into further chaos", + 'caption': 'independent.co.uk', + 'description': 'Amber Rudd has resigned and plans to run as an independent candidate in a future general election. The cabinet minister told The Sunday Times she was resigning because of Boris Johnson\'s "purge" of the party. More follows…', + 'message': 'BREAKING', + 'expandedLinks': [{ + 'original': 'https://www.independent.co.uk/news/uk/politics/amber-rudd-resign-boris-johnson-cabinet-conservatives-brexit-a9096146.html', + 'expanded': 'https://www.independent.co.uk/news/uk/politics/amber-rudd-resign-boris-johnson-cabinet-conservatives-brexit-a9096146.html' + }], + 'link': 'https://www.independent.co.uk/news/uk/politics/amber-rudd-resign-boris-johnson-cabinet-conservatives-brexit-a9096146.html', + 'postUrl': 'https://www.facebook.com/TheIndependentOnline/posts/10157392232686636', + 'subscriberCount': 8832865, + 'score': 33.56692913385827, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBCGewJje1f67Rg&w=720&h=720&url=https%3A%2F%2Fstatic.independent.co.uk%2Fs3fs-public%2Fthumbnails%2Fimage%2F2018%2F09%2F26%2F17%2Fbreaking-4.png&cfs=1&_nc_hash=AQBfdoe_AAyA9BFz', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDVU76tPdyR4Lts&url=https%3A%2F%2Fstatic.independent.co.uk%2Fs3fs-public%2Fthumbnails%2Fimage%2F2018%2F09%2F26%2F17%2Fbreaking-4.png&_nc_hash=AQAf9SJj09GBLcIW' + }], + 'statistics': { + 'actual': { + 'likeCount': 3194, + 'shareCount': 2442, + 'commentCount': 682, + 'loveCount': 365, + 'wowCount': 283, + 'hahaCount': 1538, + 'sadCount': 10, + 'angryCount': 12, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 87, + 'shareCount': 43, + 'commentCount': 55, + 'loveCount': 9, + 'wowCount': 9, + 'hahaCount': 27, + 'sadCount': 8, + 'angryCount': 16, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 19065, + 'name': 'The Independent', + 'handle': 'TheIndependentOnline', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/11051795_10152732082756636_6705742038347351188_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQmApCC_log9_TfPU5-TLVRKHyBo2YH6UPG2d6R-43r5u7HhElr7QPKk9J_AXR9q1Ac&_nc_ht=scontent.xx&oh=47ac79067cb2e33520f6920eb409611d&oe=5E0FED75', + 'subscriberCount': 8834731, + 'url': 'https://www.facebook.com/13312631635', + 'platform': 'Facebook', + 'platformId': '13312631635', + 'verified': True + } + }, { + 'id': 70157398709, + 'platformId': '6250307292_10159057208257293', + 'platform': 'Facebook', + 'date': '2019-09-07 19:00:01', + 'updated': '2019-09-08 00:18:11', + 'type': 'link', + 'title': 'NOAA’s support of President Trump over its own scientists provokes mass uproar in weather community', + 'caption': 'washingtonpost.com', + 'description': "Weather forecasters inside and outside the government and former leaders of NOAA and the Weather Service have spoken against NOAA's decision.", + 'message': "Weather forecasters inside and outside the government and former leaders of NOAA and the Weather Service have spoken against NOAA's decision.", + 'expandedLinks': [{ + 'original': 'https://www.washingtonpost.com/weather/2019/09/07/noaas-support-president-trump-over-its-own-scientists-provokes-mass-uproar-weather-community/?tid=sm_fb', + 'expanded': 'https://www.washingtonpost.com/weather/2019/09/07/noaas-support-president-trump-over-its-own-scientists-provokes-mass-uproar-weather-community/?tid=sm_fb' + }], + 'link': 'https://www.washingtonpost.com/weather/2019/09/07/noaas-support-president-trump-over-its-own-scientists-provokes-mass-uproar-weather-community/?tid=sm_fb', + 'postUrl': 'https://www.facebook.com/washingtonpost/posts/10159057208257293', + 'subscriberCount': 6289171, + 'score': 28.168560606060606, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAwZem4zr3cX-NP&w=720&h=720&url=https%3A%2F%2Fwww.washingtonpost.com%2Fresizer%2FkQCso98jOGkmOTc9hfSlGydUsOM%3D%2F1484x0%2Farc-anglerfish-washpost-prod-washpost.s3.amazonaws.com%2Fpublic%2FD2RJSBBJSJHILN2XJ3O5GQIKBA.png&cfs=1&_nc_hash=AQA2NtYOouKVPrCp', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQB4ljiz_z3LoSEg&url=https%3A%2F%2Fwww.washingtonpost.com%2Fresizer%2FkQCso98jOGkmOTc9hfSlGydUsOM%3D%2F1484x0%2Farc-anglerfish-washpost-prod-washpost.s3.amazonaws.com%2Fpublic%2FD2RJSBBJSJHILN2XJ3O5GQIKBA.png&_nc_hash=AQDVp-1s7L_A_3Ld' + }], + 'statistics': { + 'actual': { + 'likeCount': 5375, + 'shareCount': 3264, + 'commentCount': 1291, + 'loveCount': 84, + 'wowCount': 902, + 'hahaCount': 261, + 'sadCount': 331, + 'angryCount': 3365, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 183, + 'shareCount': 94, + 'commentCount': 86, + 'loveCount': 11, + 'wowCount': 27, + 'hahaCount': 37, + 'sadCount': 43, + 'angryCount': 47, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 10337, + 'name': 'Washington Post', + 'handle': 'washingtonpost', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/21430382_10156479428327293_4985425836902947855_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlVAdyvl5eHjwkppWx8pvifrl3XbqjhakYzwfQ1AHjPFaQPjFxNF4BbZq5BQ1nys4Y&_nc_ht=scontent.xx&oh=6cea07f8fc3edae1f7c743fc8997901c&oe=5DC8AB0A', + 'subscriberCount': 6289503, + 'url': 'https://www.facebook.com/6250307292', + 'platform': 'Facebook', + 'platformId': '6250307292', + 'verified': True + } + }, { + 'id': 70157976062, + 'platformId': '354522044588660_3473929489314551', + 'platform': 'Facebook', + 'date': '2019-09-07 19:00:24', + 'updated': '2019-09-08 00:39:40', + 'type': 'link', + 'title': "A mother-and-daughter team have developed what may be the world's first Alzheimer’s vaccine.", + 'caption': 'upworthy.com', + 'description': "Alzheimer's is a terrible disease that robs a person of their personality and memory before eventually leading to death. It's the sixth-largest killer in the U.S. and, currently, there are 5.8…", + 'message': 'This could be huge.', + 'expandedLinks': [{ + 'original': 'https://buff.ly/2MXnMOa', + 'expanded': 'https://www.upworthy.com/a-mother-and-daughter-team-have-developed-what-may-be-the-worlds-first-alzheimers-vaccine' + }], + 'link': 'https://buff.ly/2MXnMOa', + 'postUrl': 'https://www.facebook.com/Upworthy/posts/3473929489314551', + 'subscriberCount': 11752205, + 'score': 27.886194029850746, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d71a49b713b6d095d69ee08%2F92bcb6d0ada52001f20e776814129c522da2386c_883a50e22f1efb9063f4cb3154f04b2c7cd83e52_facebook&cfs=1&_nc_hash=AQDe8CpgkSIGZoni', + 'height': 600, + 'width': 600, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCoYDTqFI4MiK1i&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d71a49b713b6d095d69ee08%2F92bcb6d0ada52001f20e776814129c522da2386c_883a50e22f1efb9063f4cb3154f04b2c7cd83e52_facebook&_nc_hash=AQBfZm7y0NWkSc6X' + }], + 'statistics': { + 'actual': { + 'likeCount': 6491, + 'shareCount': 4238, + 'commentCount': 319, + 'loveCount': 1719, + 'wowCount': 2161, + 'hahaCount': 14, + 'sadCount': 4, + 'angryCount': 1, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 281, + 'shareCount': 111, + 'commentCount': 48, + 'loveCount': 51, + 'wowCount': 14, + 'hahaCount': 15, + 'sadCount': 9, + 'angryCount': 7, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 3919, + 'name': 'Upworthy', + 'handle': 'Upworthy', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1914363_1176320005742189_4709951186905632219_n.png?_nc_cat=1&_nc_oc=AQlPiX5mYxZC_Xj8_M4a7JZZvCD27izvAXTMtobXrLjwA4S5Pel-CsMh5GMouHt8LNg&_nc_ht=scontent.xx&oh=ba4e0db7c2521356dc17108d8aa4a12a&oe=5E04D944', + 'subscriberCount': 11752205, + 'url': 'https://www.facebook.com/354522044588660', + 'platform': 'Facebook', + 'platformId': '354522044588660', + 'verified': True + } + }, { + 'id': 70165824138, + 'platformId': '140738092630206_2613271415376849', + 'platform': 'Facebook', + 'date': '2019-09-07 21:50:01', + 'updated': '2019-09-08 00:19:09', + 'type': 'link', + 'title': "Miley Cyrus vows to not have children till climate change is solved and fish are 'in the water'", + 'caption': 'theblaze.com', + 'description': "She wants any children to be able to 'live on an earth with fish in the water'", + 'message': 'She wants any children to be able to "live on an earth with fish in the water."', + 'expandedLinks': [{ + 'original': 'https://bit.ly/2Lxlp1f', + 'expanded': 'https://www.theblaze.com/news/miley-cyrus-vows-to-not-have-children-till-climate-change-is-solved-and-fish-are-in-the-water?utm_content=buffer95e36&utm_medium=organic&utm_source=facebook&utm_campaign=fb-theblaze' + }], + 'link': 'https://bit.ly/2Lxlp1f', + 'postUrl': 'https://www.facebook.com/TheBlaze/posts/2613271415376849', + 'subscriberCount': 2089159, + 'score': 17.958217270194986, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBP8jNPeMx4HEtr&w=720&h=720&url=https%3A%2F%2Ftheblaze-img.rbl.ms%2Fsimage%2Fhttps%253A%252F%252Fassets.rbl.ms%252F19878079%252F1200x600.jpg%2F2000%252C2000%2FNZZZMKyAwJ5%252BbJLi%2Fimg.jpg&cfs=1&sx=217&sy=0&sw=1000&sh=1000&_nc_hash=AQAT_MAUTaHqDTM2', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAdLFFRmrWHlqEa&url=https%3A%2F%2Ftheblaze-img.rbl.ms%2Fsimage%2Fhttps%253A%252F%252Fassets.rbl.ms%252F19878079%252F1200x600.jpg%2F2000%252C2000%2FNZZZMKyAwJ5%252BbJLi%2Fimg.jpg&_nc_hash=AQCY9HQEogoRJVy4' + }], + 'statistics': { + 'actual': { + 'likeCount': 747, + 'shareCount': 679, + 'commentCount': 2277, + 'loveCount': 158, + 'wowCount': 63, + 'hahaCount': 2476, + 'sadCount': 17, + 'angryCount': 30, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 65, + 'shareCount': 65, + 'commentCount': 92, + 'loveCount': 4, + 'wowCount': 17, + 'hahaCount': 43, + 'sadCount': 8, + 'angryCount': 65, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6892, + 'name': 'TheBlaze', + 'handle': 'TheBlaze', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/47350623_2141870595850269_7864140219111440384_n.png?_nc_cat=1&_nc_oc=AQmGyVQswjmmaInAkgMKbLJ62jAcb2BShbL78435-MqCEBLedhKr7VO97Nzxt2x220k&_nc_ht=scontent.xx&oh=4a5ce0b44b6400aab9bb78aa2afdee87&oe=5E011864', + 'subscriberCount': 2089166, + 'url': 'https://www.facebook.com/140738092630206', + 'platform': 'Facebook', + 'platformId': '140738092630206', + 'verified': True + } + }, { + 'id': 70158921605, + 'platformId': '40656699159_10156974744419160', + 'platform': 'Facebook', + 'date': '2019-09-07 19:30:02', + 'updated': '2019-09-08 00:30:26', + 'type': 'link', + 'title': "Beto: Americans Will Willingly Surrender Guns — It'll Be The Law", + 'caption': 'washingtonexaminer.com', + 'description': 'MANCHESTER, New Hampshire — Former Texas Democratic Rep. Beto O’Rourke, a White House presidential contender, confirmed Saturday at the New Hampshire Democratic Party Convention that his mandatory firearm buyback plan would not include law enforcement door knocks.', + 'message': 'Would you comply with a mandatory buyback program?', + 'expandedLinks': [{ + 'original': 'https://washex.am/2N0eq4j', + 'expanded': 'https://www.washingtonexaminer.com/news/beto-orourke-i-dont-see-the-policemen-going-door-to-door-with-my-mandatory-gun-buyback-plan' + }], + 'link': 'https://washex.am/2N0eq4j', + 'postUrl': 'https://www.facebook.com/WashingtonExaminer/posts/10156974744419160', + 'subscriberCount': 714637, + 'score': 17.656521739130437, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAKuUJV91AX25Yb&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5c42324500c63f68be6da5c2%2F5d73fa34e6dd180f722ad479%2Fb963a3b05ef838a45f688712f3d863f2.original.jpg&cfs=1&_nc_hash=AQBZqcfcgz2rRjAX', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDyJATdiB8d3Y9i&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5c42324500c63f68be6da5c2%2F5d73fa34e6dd180f722ad479%2Fb963a3b05ef838a45f688712f3d863f2.original.jpg&_nc_hash=AQALGOQFLGNI7dm-' + }], + 'statistics': { + 'actual': { + 'likeCount': 122, + 'shareCount': 758, + 'commentCount': 1595, + 'loveCount': 5, + 'wowCount': 15, + 'hahaCount': 1024, + 'sadCount': 7, + 'angryCount': 535, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 65, + 'shareCount': 40, + 'commentCount': 62, + 'loveCount': 7, + 'wowCount': 7, + 'hahaCount': 19, + 'sadCount': 3, + 'angryCount': 27, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13991, + 'name': 'Washington Examiner', + 'handle': 'WashingtonExaminer', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/36928610_10156017618514160_6905131952433528832_n.jpg?_nc_cat=111&_nc_oc=AQnKuEJBvxlMgc-zQHzSfEtsgFfHehn1pucacRbqrYlmmQp69EGwogOuyEUo-OV8OWM&_nc_ht=scontent.xx&oh=88b1063a5362110cc87fb9d6caedea35&oe=5DFE6885', + 'subscriberCount': 714626, + 'url': 'https://www.facebook.com/40656699159', + 'platform': 'Facebook', + 'platformId': '40656699159', + 'verified': True + } + }, { + 'id': 70175854512, + 'platformId': '182919686769_10156515300916770', + 'platform': 'Facebook', + 'date': '2019-09-08 00:00:12', + 'updated': '2019-09-08 00:40:44', + 'type': 'link', + 'title': 'Chicago Mayor Blames Republicans For City’s Violence', + 'caption': 'dailycaller.com', + 'description': "'Keep our name out of your mouth'", + 'message': 'Craziness in Chicago...', + 'expandedLinks': [{ + 'original': 'https://dailycaller.com/2019/09/03/chicago-mayor-blames-republicans-gun-violence/', + 'expanded': 'https://dailycaller.com/2019/09/03/chicago-mayor-blames-republicans-gun-violence/' + }], + 'link': 'https://dailycaller.com/2019/09/03/chicago-mayor-blames-republicans-gun-violence/', + 'postUrl': 'https://www.facebook.com/DailyCaller/posts/10156515300916770', + 'subscriberCount': 5408115, + 'score': 17.35958904109589, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQA54EEroC3nEHay&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f06e2545380e7440c885%2F33d0f3248d5c22cd6f7692bf469ed35a5ab97f26_fb9b8998b7e16da2c675a1ab25b4b260d8c0dcc2_facebook&cfs=1&_nc_hash=AQDnUYQYto-6jboY', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDc5HzVX1lJnosX&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f06e2545380e7440c885%2F33d0f3248d5c22cd6f7692bf469ed35a5ab97f26_fb9b8998b7e16da2c675a1ab25b4b260d8c0dcc2_facebook&_nc_hash=AQAQYgQAvvwFmxWw' + }], + 'statistics': { + 'actual': { + 'likeCount': 232, + 'shareCount': 776, + 'commentCount': 1586, + 'loveCount': 4, + 'wowCount': 88, + 'hahaCount': 1653, + 'sadCount': 41, + 'angryCount': 689, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 83, + 'shareCount': 39, + 'commentCount': 79, + 'loveCount': 7, + 'wowCount': 11, + 'hahaCount': 40, + 'sadCount': 5, + 'angryCount': 28, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13489, + 'name': 'The Daily Caller', + 'handle': 'DailyCaller', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471', + 'subscriberCount': 5408115, + 'url': 'https://www.facebook.com/182919686769', + 'platform': 'Facebook', + 'platformId': '182919686769', + 'verified': True + } + }, { + 'id': 70158922017, + 'platformId': '268914272540_10156464022182541', + 'platform': 'Facebook', + 'date': '2019-09-07 19:20:05', + 'updated': '2019-09-08 00:30:46', + 'type': 'link', + 'title': 'Michigan trophy hunter who paid $400,000 to hunt rare black rhino allowed to import its horns, skin and skull to America', + 'caption': 'nydailynews.com', + 'description': ' ', + 'message': 'A Michigan man who shelled out $400K to hunt and kill a rare black rhinoceros in Africa last year will be allowed to import its skin, skull and horns to America.', + 'expandedLinks': [{ + 'original': 'https://trib.al/MQPSeDX', + 'expanded': 'https://www.nydailynews.com/news/national/ny-trophy-hunter-rare-black-rhino-permit-horns-skin-skull-20190907-yq7z3q3dlvhrxf2qam2iuydz2m-story.html?fbclid=IwAR1clOge0lIFgq-sBiUrOisjtvPs7gnFJFu2ci6Sh_cHpfUiQ1pU_wSjuFk' + }], + 'link': 'https://trib.al/MQPSeDX', + 'postUrl': 'https://www.facebook.com/NYDailyNews/posts/10156464022182541', + 'subscriberCount': 3119682, + 'score': 14.468468468468469, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBl8MfHRPDkK0dp&w=720&h=720&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2F7mCUiJ_3DITEHCIaQCpKbrw0Rxo%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2FL4OKZAE3QBANLGI7XZ25UQXW2Q.jpg&cfs=1&_nc_hash=AQAWKenM6Rp9QV0U', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBkqItNryNEh7Am&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2F7mCUiJ_3DITEHCIaQCpKbrw0Rxo%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2FL4OKZAE3QBANLGI7XZ25UQXW2Q.jpg&_nc_hash=AQDuG8_P3XVgFrCd' + }], + 'statistics': { + 'actual': { + 'likeCount': 82, + 'shareCount': 326, + 'commentCount': 212, + 'loveCount': 8, + 'wowCount': 24, + 'hahaCount': 2, + 'sadCount': 68, + 'angryCount': 884, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 28, + 'shareCount': 26, + 'commentCount': 17, + 'loveCount': 4, + 'wowCount': 9, + 'hahaCount': 9, + 'sadCount': 8, + 'angryCount': 10, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 18752, + 'name': 'New York Daily News', + 'handle': 'NYDailyNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/34963357_10155516739962541_1916910854155010048_n.jpg?_nc_cat=1&_nc_oc=AQmjFK4eo-CK8fL21CSJr1btV3Al6e74byD7EyXVL8apaCEHf5ql7TW_ZRkUiYID0qY&_nc_ht=scontent.xx&oh=e33f579d2d00c6afc68a0e7cbd70b6c8&oe=5E0623E1', + 'subscriberCount': 3120017, + 'url': 'https://www.facebook.com/268914272540', + 'platform': 'Facebook', + 'platformId': '268914272540', + 'verified': True + } + }, { + 'id': 70172878599, + 'platformId': '210277954204_10156789911629205', + 'platform': 'Facebook', + 'date': '2019-09-07 23:41:12', + 'updated': '2019-09-08 00:39:40', + 'type': 'youtube', + 'caption': 'youtube.com', + 'description': 'San Francisco took a symbolic vote to declare the NRA a terrorist organization. Cenk Uygur and Ana Kasparian, hosts of The Young Turks, break it down. MORE T...', + 'expandedLinks': [{ + 'original': 'https://www.youtube.com/watch?v=-M53X2IrQes&feature=youtu.be', + 'expanded': 'https://www.youtube.com/watch?v=-M53X2IrQes&feature=youtu.be' + }], + 'link': 'https://www.youtube.com/watch?v=-M53X2IrQes&feature=youtu.be', + 'postUrl': 'https://www.facebook.com/TheYoungTurks/posts/10156789911629205', + 'subscriberCount': 2099948, + 'score': 13.402985074626866, + 'media': [{ + 'type': 'video', + 'url': 'https://www.youtube.com/embed/-M53X2IrQes?autoplay=1', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD0ftgbO2TXPuL-&w=720&h=720&url=https%3A%2F%2Fi.ytimg.com%2Fvi%2F-M53X2IrQes%2Fmaxresdefault.jpg&cfs=1&_nc_hash=AQAJe0Z3lWqtjAjc', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBt2WjZ1yQI3VUu&w=1280&h=720&url=https%3A%2F%2Fi.ytimg.com%2Fvi%2F-M53X2IrQes%2Fmaxresdefault.jpg&crop&_nc_hash=AQDnZ7LCi31UOQos' + }], + 'statistics': { + 'actual': { + 'likeCount': 526, + 'shareCount': 92, + 'commentCount': 79, + 'loveCount': 140, + 'wowCount': 13, + 'hahaCount': 43, + 'sadCount': 0, + 'angryCount': 5, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 19, + 'shareCount': 10, + 'commentCount': 11, + 'loveCount': 5, + 'wowCount': 2, + 'hahaCount': 11, + 'sadCount': 3, + 'angryCount': 6, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6786, + 'name': 'The Young Turks', + 'handle': 'TheYoungTurks', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1003713_10151543513399205_523422522_n.jpg?_nc_cat=1&_nc_oc=AQnnXFBTIz-GDK79X4ZL1tWD8ZS5F3y_makkEyxpcCf_7U3QmoBvJjb9aWlpiMT8dro&_nc_ht=scontent.xx&oh=5684bdb9a01611f4ca6e9ea9dedbc57e&oe=5DF64CB5', + 'subscriberCount': 2100186, + 'url': 'https://www.facebook.com/210277954204', + 'platform': 'Facebook', + 'platformId': '210277954204', + 'verified': True + } + }, { + 'id': 70175943871, + 'platformId': '5550296508_10159878537516509', + 'platform': 'Facebook', + 'date': '2019-09-08 00:33:39', + 'updated': '2019-09-08 00:41:59', + 'type': 'link', + 'title': "Alex Trebek is done with chemotherapy and back at work on 'Jeopardy!'", + 'caption': 'cnn.com', + 'description': ' ', + 'message': 'This beloved host of a long-running trivia show is back at work', + 'expandedLinks': [{ + 'original': 'https://cnn.it/34xMv0O', + 'expanded': 'https://www.cnn.com/2019/08/29/media/alex-trebek-chemo-jeopardy/index.html?utm_source=fbCNN&utm_content=2019-09-08T00%3A33%3A35&utm_term=link&utm_medium=social' + }], + 'link': 'https://cnn.it/34xMv0O', + 'postUrl': 'https://www.facebook.com/cnn/posts/10159878537516509', + 'subscriberCount': 31389797, + 'score': 13.347058823529412, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCd0-Oc1w-PqqN0&w=619&h=619&url=https%3A%2F%2Fcdn.cnn.com%2Fcnnnext%2Fdam%2Fassets%2F190308154523-alex-trebek-super-tease.jpg&cfs=1&sx=8&sy=0&sw=619&sh=619&_nc_hash=AQAQt7q9Fv_Md2Ab', + 'height': 619, + 'width': 619, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBLf0lDqhzVJPlY&url=https%3A%2F%2Fcdn.cnn.com%2Fcnnnext%2Fdam%2Fassets%2F190308154523-alex-trebek-super-tease.jpg&_nc_hash=AQAKZjUbHBnq8Pyp' + }], + 'statistics': { + 'actual': { + 'likeCount': 1439, + 'shareCount': 196, + 'commentCount': 72, + 'loveCount': 543, + 'wowCount': 18, + 'hahaCount': 0, + 'sadCount': 0, + 'angryCount': 1, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 60, + 'shareCount': 26, + 'commentCount': 33, + 'loveCount': 7, + 'wowCount': 14, + 'hahaCount': 8, + 'sadCount': 14, + 'angryCount': 8, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 8323, + 'name': 'CNN', + 'handle': 'cnn', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12289622_10154246192721509_1897912583584847639_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQnmWKpivHkplQlHvH6RU7ER1noSOq6saypKUuDbSnV0FWNYEYghmJGPxBpmhJO8UsU&_nc_ht=scontent.xx&oh=12e2b35de35132a27c2772d3fe565936&oe=5DF3AC02', + 'subscriberCount': 31389797, + 'url': 'https://www.facebook.com/5550296508', + 'platform': 'Facebook', + 'platformId': '5550296508', + 'verified': True + } + }, { + 'id': 70159872429, + 'platformId': '341163402640457_2815563945200378', + 'platform': 'Facebook', + 'date': '2019-09-07 19:45:00', + 'updated': '2019-09-08 00:41:28', + 'type': 'native_video', + 'description': 'Police repeatedly punched this Black teen for ‘resisting’ — but new bodycam footage shows otherwise', + 'message': 'This teen is suing for negligence and emotional distress after police repeatedly punched him', + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/NowThisPolitics/videos/513652776066740/', + 'expanded': 'https://www.facebook.com/NowThisPolitics/videos/513652776066740/' + }], + 'link': 'https://www.facebook.com/NowThisPolitics/videos/513652776066740/', + 'postUrl': 'https://www.facebook.com/NowThisNews/posts/2815563945200378', + 'subscriberCount': 14557547, + 'score': 12.941538461538462, + 'media': [{ + 'type': 'video', + 'url': 'https://video.xx.fbcdn.net/v/t42.9040-2/70744112_493008498145296_4014857848306532352_n.mp4?_nc_cat=106&efg=eyJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_log=1&_nc_oc=AQnIjFMc409QS5XESkpl6rChSVLmMz0ebAUqlNTlw5C-uk-7vc_noBr8hKrDpzFaj5A&_nc_ht=video.xx&oh=37fcc40c0b8fe5fe3400541ec2a6577e&oe=5D75970B', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/p720x720/67896250_513654266066591_881517186823225344_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQnMPVnf-2Fgr7r1vVtMtspa6BCIOBZRHodyW1NsVAyGNpoYMGSfP_4aKH_6qHkZ6-c&_nc_ht=scontent.xx&oh=f48f82b31a66fcef783579b4673d16df&oe=5E023A0D', + 'height': 720, + 'width': 720, + 'full': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/67896250_513654266066591_881517186823225344_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQnMPVnf-2Fgr7r1vVtMtspa6BCIOBZRHodyW1NsVAyGNpoYMGSfP_4aKH_6qHkZ6-c&_nc_ht=scontent.xx&oh=5aebdedebaed77988b7fa660957310ec&oe=5DF6E6A6' + }], + 'statistics': { + 'actual': { + 'likeCount': 838, + 'shareCount': 2297, + 'commentCount': 1081, + 'loveCount': 10, + 'wowCount': 245, + 'hahaCount': 36, + 'sadCount': 567, + 'angryCount': 3338, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 279, + 'shareCount': 175, + 'commentCount': 86, + 'loveCount': 30, + 'wowCount': 23, + 'hahaCount': 16, + 'sadCount': 27, + 'angryCount': 14, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 10247, + 'name': 'NowThis', + 'handle': 'NowThisNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/28379313_1840609126029203_6405012222846484702_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQkFdmIYy2uPLXX0xb7b7uQjQ-yiayvSBaPWqSlby_pCoW_1_Iybmu7xSmUb-UMr1gc&_nc_ht=scontent.xx&oh=add01854d7218f79e9aad6351846e535&oe=5E0CA890', + 'subscriberCount': 14558656, + 'url': 'https://www.facebook.com/341163402640457', + 'platform': 'Facebook', + 'platformId': '341163402640457', + 'verified': True + }, + 'videoLengthMS': 136297 + }, { + 'id': 70158620099, + 'platformId': '273864989376427_2989862727776626', + 'platform': 'Facebook', + 'date': '2019-09-07 19:19:07', + 'updated': '2019-09-08 00:44:08', + 'type': 'link', + 'title': 'Rep. Cleaver: Trump has turned presidency into ‘ATM machine’', + 'caption': 'msnbc.com', + 'description': 'POLITICO reports that an Air National Guard crew stopped overnight at President Trump’s resort in Scotland – and this layover is now being investigated by the House Oversight Committee. Rep. Emanuel Cleaver (D-MO) joins Alex Witt discuss this investigation into whether U.S. military spending has...', + 'message': 'Rep. Cleaver says if report that military spending benefited Trump resort is true, "then Donald Trump has achieved something that I don’t think any president in our history has achieved or even attempted to achieve, and that is to corrupt the military."', + 'expandedLinks': [{ + 'original': 'https://on.msnbc.com/34vvmoJ', + 'expanded': 'https://www.msnbc.com/weekends-with-alex-witt/watch/rep-cleaver-trump-has-turned-presidency-into-atm-machine-68436037881?cid=sm_npd_ms_fb_ma' + }], + 'link': 'https://on.msnbc.com/34vvmoJ', + 'postUrl': 'https://www.facebook.com/msnbc/posts/2989862727776626', + 'subscriberCount': 2290452, + 'score': 12.633689839572192, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAQ_4ojLTZ1pFrN&w=630&h=630&url=https%3A%2F%2Fmedia11.s-nbcnews.com%2Fj%2FMSNBC%2FComponents%2FVideo%2F201909%2Fn_witt_EmanuelCleaver_TrumpScotland_190907_1920x1080.nbcnews-fp-1200-630.jpg&cfs=1&sx=293&sy=0&sw=630&sh=630&_nc_hash=AQDt4JYiv6yr-ARF', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAF68YIIFGWkOMU&url=https%3A%2F%2Fmedia11.s-nbcnews.com%2Fj%2FMSNBC%2FComponents%2FVideo%2F201909%2Fn_witt_EmanuelCleaver_TrumpScotland_190907_1920x1080.nbcnews-fp-1200-630.jpg&_nc_hash=AQAEszyHppWr9rkP' + }], + 'statistics': { + 'actual': { + 'likeCount': 1339, + 'shareCount': 1498, + 'commentCount': 425, + 'loveCount': 18, + 'wowCount': 75, + 'hahaCount': 62, + 'sadCount': 405, + 'angryCount': 903, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 88, + 'shareCount': 57, + 'commentCount': 95, + 'loveCount': 8, + 'wowCount': 15, + 'hahaCount': 31, + 'sadCount': 18, + 'angryCount': 62, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 8324, + 'name': 'MSNBC', + 'handle': 'msnbc', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15741035_1414682885294626_1846918595507309997_n.jpg?_nc_cat=1&_nc_oc=AQmNSDImiJ4dNS4a9BuTF3tFyF2W0xSOLxgQfdY6R_AXaZm8hkQc6XT-GWy5NIEe080&_nc_ht=scontent.xx&oh=968e2c2f1d76f19278ac5985b55af46d&oe=5E003BB2', + 'subscriberCount': 2290512, + 'url': 'https://www.facebook.com/273864989376427', + 'platform': 'Facebook', + 'platformId': '273864989376427', + 'verified': True + } + }, { + 'id': 70168994896, + 'platformId': '114050161948682_2662947170392289', + 'platform': 'Facebook', + 'date': '2019-09-07 22:35:54', + 'updated': '2019-09-08 00:45:04', + 'type': 'link', + 'title': 'Albertsons Companies joins supermarkets in changing guns policy', + 'caption': 'reuters.com', + 'description': ' ', + 'message': 'Supermarket operator Albertsons Companies said on Saturday it would ask customers not to openly carry firearms at its stores, joining an array of retailers and store chains this week who changed their gun policy in light of several mass shootings in the United States.', + 'expandedLinks': [{ + 'original': 'https://www.reuters.com/article/us-albertsons-guncontrol-idUSKCN1VS0QF?utm_campaign=trueAnthem%3A+Trending+Content&utm_content=5d7430b5145a57000153fe31&utm_medium=trueAnthem&utm_source=facebook', + 'expanded': 'https://www.reuters.com/article/us-albertsons-guncontrol-idUSKCN1VS0QF?utm_campaign=trueAnthem%3A+Trending+Content&utm_content=5d7430b5145a57000153fe31&utm_medium=trueAnthem&utm_source=facebook' + }], + 'link': 'https://www.reuters.com/article/us-albertsons-guncontrol-idUSKCN1VS0QF?utm_campaign=trueAnthem%3A+Trending+Content&utm_content=5d7430b5145a57000153fe31&utm_medium=trueAnthem&utm_source=facebook', + 'postUrl': 'https://www.facebook.com/Reuters/posts/2662947170392289', + 'subscriberCount': 4154272, + 'score': 12.384615384615385, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQB-pWZOAddTH2Dh&w=720&h=720&url=https%3A%2F%2Fs2.reutersmedia.net%2Fresources%2Fr%2F%3Fm%3D02%26d%3D20190907%26t%3D2%26i%3D1427373205%26w%3D1200%26r%3DLYNXNPEF8610E&cfs=1&_nc_hash=AQCPVsKpXiRqvauR', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAwf7DJd7fL-VnI&url=https%3A%2F%2Fs2.reutersmedia.net%2Fresources%2Fr%2F%3Fm%3D02%26d%3D20190907%26t%3D2%26i%3D1427373205%26w%3D1200%26r%3DLYNXNPEF8610E&_nc_hash=AQDhYVtqc1GGWjgO' + }], + 'statistics': { + 'actual': { + 'likeCount': 512, + 'shareCount': 74, + 'commentCount': 87, + 'loveCount': 109, + 'wowCount': 4, + 'hahaCount': 11, + 'sadCount': 0, + 'angryCount': 8, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 26, + 'shareCount': 8, + 'commentCount': 9, + 'loveCount': 4, + 'wowCount': 3, + 'hahaCount': 6, + 'sadCount': 5, + 'angryCount': 4, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 10323, + 'name': 'Reuters', + 'handle': 'Reuters', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/51325614_2292147310805612_3874403780548100096_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlLN3v5RKOKT6LVQj--bvulAczkWupv1AuwaG14c3MkOAyF9oLoLGad6n1Rl6FhN6k&_nc_ht=scontent.xx&oh=73deaf953fbb14e82a9c92b2f850db23&oe=5E0ACADC', + 'subscriberCount': 4154522, + 'url': 'https://www.facebook.com/114050161948682', + 'platform': 'Facebook', + 'platformId': '114050161948682', + 'verified': True + } + }, { + 'id': 70157942853, + 'platformId': '532854420074062_3747993908560081', + 'platform': 'Facebook', + 'date': '2019-09-07 19:03:00', + 'updated': '2019-09-08 00:43:19', + 'type': 'link', + 'title': 'High Schooler Penalized 15 Yards for Praying in the End Zone', + 'caption': 'fanbuzz.com', + 'description': 'The list of things you are allowed to do after scoring a touchdown has gotten very short, particularly in high school. Elaborate celebrations have never been allowed at the high school or college l…', + 'message': 'WOW 😡', + 'expandedLinks': [{ + 'original': 'https://fanbuzz.com/national/high-schooler-penalized-15-yards-for-praying-in-the-endzone/?utm_source=facebook&utm_medium=agora&utm_term=faves&utm_campaign=faves', + 'expanded': 'https://fanbuzz.com/national/high-schooler-penalized-15-yards-for-praying-in-the-endzone/?utm_source=facebook&utm_medium=agora&utm_term=faves&utm_campaign=faves' + }], + 'link': 'https://fanbuzz.com/national/high-schooler-penalized-15-yards-for-praying-in-the-endzone/?utm_source=facebook&utm_medium=agora&utm_term=faves&utm_campaign=faves', + 'postUrl': 'https://www.facebook.com/thefavesusa/posts/3747993908560081', + 'subscriberCount': 6323442, + 'score': 12.014760147601477, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD-IQrKwhdvG3JM&w=720&h=720&url=http%3A%2F%2Ffansrule.files.wordpress.com%2F2014%2F11%2Fknee.png%3Fw%3D1200%26h%3D627%26crop%3D1&cfs=1&_nc_hash=AQDrhZ6u7MuQyOka', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBBj0aMdnWPQ4KN&url=http%3A%2F%2Ffansrule.files.wordpress.com%2F2014%2F11%2Fknee.png%3Fw%3D1200%26h%3D627%26crop%3D1&_nc_hash=AQBbKfxbS6qdnJSt' + }], + 'statistics': { + 'actual': { + 'likeCount': 188, + 'shareCount': 885, + 'commentCount': 580, + 'loveCount': 5, + 'wowCount': 79, + 'hahaCount': 9, + 'sadCount': 257, + 'angryCount': 1253, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 64, + 'shareCount': 87, + 'commentCount': 52, + 'loveCount': 7, + 'wowCount': 15, + 'hahaCount': 11, + 'sadCount': 16, + 'angryCount': 19, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 48728, + 'name': 'Faves USA', + 'handle': 'thefavesusa', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13590243_1529567430402751_5505197343663543097_n.jpg?_nc_cat=1&_nc_oc=AQlqHYa5f3hh3Tu7bwL_7yF5WVkxCnE2WIU8c_5Fs_eMudF84ODKZoLqn8S3lZDdt3g&_nc_ht=scontent.xx&oh=b45134ffcb1aa806ced2cb018887de04&oe=5E0ED98A', + 'subscriberCount': 6323373, + 'url': 'https://www.facebook.com/532854420074062', + 'platform': 'Facebook', + 'platformId': '532854420074062', + 'verified': True + } + }, { + 'id': 70166547836, + 'platformId': '908009612563863_2986545271376943', + 'platform': 'Facebook', + 'date': '2019-09-07 22:00:27', + 'updated': '2019-09-08 00:48:09', + 'type': 'native_video', + 'message': '‘You ask a lot of stupid questions.’ — Pres. Trump has made a pastime out of attacking women journalists of color', + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/NowThisPolitics/videos/818841295179184/', + 'expanded': 'https://www.facebook.com/NowThisPolitics/videos/818841295179184/' + }], + 'link': 'https://www.facebook.com/NowThisPolitics/videos/818841295179184/', + 'postUrl': 'https://www.facebook.com/NowThisPolitics/posts/2986545271376943', + 'subscriberCount': 6074083, + 'score': 11.583710407239819, + 'media': [{ + 'type': 'video', + 'url': 'https://video.xx.fbcdn.net/v/t42.9040-2/10000000_651586571919863_4335461527566942208_n.mp4?_nc_cat=111&efg=eyJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_log=1&_nc_oc=AQmNgZ66SHVdQFXtzxrzGHoWaVIlMDoLBwmnY9N9W4xHk1wkDz96S-h-1nZoDLC-MbM&_nc_ht=video.xx&oh=033f5207ca6ec2f6941f99b21660a396&oe=5D759CBE', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/p720x720/67128103_793076731088974_6383828119333109760_n.jpg?_nc_cat=103&_nc_log=1&_nc_oc=AQl0V9WmrSLlnMwSjWBceYUVZflzfeIB4hjLR_IsJS-oRteBnkFurlOVv2cB2Cug7ak&_nc_ht=scontent.xx&oh=788fcd3a1948fe3b524810415b0f86a2&oe=5DC90F1E', + 'height': 720, + 'width': 720, + 'full': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/67128103_793076731088974_6383828119333109760_n.jpg?_nc_cat=103&_nc_log=1&_nc_oc=AQl0V9WmrSLlnMwSjWBceYUVZflzfeIB4hjLR_IsJS-oRteBnkFurlOVv2cB2Cug7ak&_nc_ht=scontent.xx&oh=9dee08bdf149fa35842ba7ac4113fccc&oe=5E0ACC45' + }], + 'statistics': { + 'actual': { + 'likeCount': 679, + 'shareCount': 1545, + 'commentCount': 1931, + 'loveCount': 27, + 'wowCount': 132, + 'hahaCount': 254, + 'sadCount': 169, + 'angryCount': 2943, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 239, + 'shareCount': 196, + 'commentCount': 103, + 'loveCount': 27, + 'wowCount': 19, + 'hahaCount': 24, + 'sadCount': 28, + 'angryCount': 27, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 311636, + 'name': 'NowThis Politics', + 'handle': 'NowThisPolitics', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/28276603_1939096412788506_2850422809072819205_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlBSULvu9xr5smvB3kmRub5MfL3SpyPxNX94GEyc5skmb19swOR40nthDv1Kip3kcw&_nc_ht=scontent.xx&oh=b734d3faa39291c805198e3ad7de3450&oe=5DFF0890', + 'subscriberCount': 6074746, + 'url': 'https://www.facebook.com/908009612563863', + 'platform': 'Facebook', + 'platformId': '908009612563863', + 'verified': True + }, + 'videoLengthMS': 216467 + }, { + 'id': 70167955465, + 'platformId': '20446254070_10156890634939071', + 'platform': 'Facebook', + 'date': '2019-09-07 22:20:02', + 'updated': '2019-09-08 00:42:26', + 'type': 'link', + 'title': '19-year-old Canadian Bianca Andreescu defeats Serena Williams in straight sets in the US Open final', + 'caption': 'businessinsider.com', + 'description': "Andreescu's victory Saturday prevented Williams from claiming what would have been her record-tying 24th major singles championship.", + 'message': 'Bianca Andreescu built a big lead and then held on to upset Serena Williams 6-3, 7-5.', + 'expandedLinks': [{ + 'original': 'https://bit.ly/2UCsgL8', + 'expanded': 'https://www.businessinsider.com/bianca-andreescu-defeats-serena-williams-us-open-final-2019-9?utm_content=buffer39cb2&utm_medium=social&utm_source=facebook.com&utm_campaign=buffer-bi' + }], + 'link': 'https://bit.ly/2UCsgL8', + 'postUrl': 'https://www.facebook.com/businessinsider/posts/10156890634939071', + 'subscriberCount': 9107012, + 'score': 11.270833333333334, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBGfkqTVz4UilE_&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d742c27e06cfa1a6c0dd357%2F090aaece09f13ab3d249fb01a6b03fe1d0a77696_a0619c160e7449d5962096e9723c12db15abc024_facebook&cfs=1&_nc_hash=AQDohz4x58mNUoe7', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDRg-pPDUTgXpv6&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d742c27e06cfa1a6c0dd357%2F090aaece09f13ab3d249fb01a6b03fe1d0a77696_a0619c160e7449d5962096e9723c12db15abc024_facebook&_nc_hash=AQCdrW99_j9Evkv4' + }], + 'statistics': { + 'actual': { + 'likeCount': 360, + 'shareCount': 48, + 'commentCount': 34, + 'loveCount': 48, + 'wowCount': 41, + 'hahaCount': 3, + 'sadCount': 6, + 'angryCount': 1, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 12, + 'shareCount': 6, + 'commentCount': 6, + 'loveCount': 2, + 'wowCount': 3, + 'hahaCount': 7, + 'sadCount': 5, + 'angryCount': 7, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6648, + 'name': 'Business Insider', + 'handle': 'businessinsider', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/20140008_10154867513079071_8190657407315988923_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkI55CBCj4kJdip-PX9AJ_S4mxJ5XQ4nlum3ikySzQgBRQCJSXsyjHW-8w8qPH2aX4&_nc_ht=scontent.xx&oh=4d024551fc98af700d89602c6980c3c0&oe=5E155CB9', + 'subscriberCount': 9107575, + 'url': 'https://www.facebook.com/20446254070', + 'platform': 'Facebook', + 'platformId': '20446254070', + 'verified': True + } + }, { + 'id': 70163422198, + 'platformId': '532854420074062_3748273735198765', + 'platform': 'Facebook', + 'date': '2019-09-07 21:02:32', + 'updated': '2019-09-08 00:43:19', + 'type': 'native_video', + 'description': 'Just chilling out... 😂😍', + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/VT/videos/327087418106099/', + 'expanded': 'https://www.facebook.com/VT/videos/327087418106099/' + }], + 'link': 'https://www.facebook.com/VT/videos/327087418106099/', + 'postUrl': 'https://www.facebook.com/thefavesusa/posts/3748273735198765', + 'subscriberCount': 6323442, + 'score': 11.15549597855228, + 'media': [{ + 'type': 'video', + 'url': 'https://video.xx.fbcdn.net/v/t42.9040-2/46221677_357208778367365_7578121201361354752_n.mp4?_nc_cat=108&efg=eyJybHIiOjMxMywicmxhIjo1MTIsInZlbmNvZGVfdGFnIjoic3ZlX3NkIn0%3D&_nc_oc=AQnOjrn8FDX2fxGFfKnGJZqWUFAWlA_cvTOIuOBt9OOmY-usgi8QZRiJx2scwnwUMdU&rl=313&vabr=174&_nc_ht=video.xx&oh=6f26a3ba7feed19e5a13b63b659afa4e&oe=5D75B702', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/s720x720/45627901_327088121439362_6904272001196621824_n.jpg?_nc_cat=1&_nc_oc=AQn6h63uzCboFTqDkVSVhwjW_rs_dZ6IZACYblps27vlp0Upu5U471nUelf5p_T-pQE&_nc_ht=scontent.xx&oh=fff7a12b0243ecc92c367ac0139f03bc&oe=5E1112A4', + 'height': 720, + 'width': 405, + 'full': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/45627901_327088121439362_6904272001196621824_n.jpg?_nc_cat=1&_nc_oc=AQn6h63uzCboFTqDkVSVhwjW_rs_dZ6IZACYblps27vlp0Upu5U471nUelf5p_T-pQE&_nc_ht=scontent.xx&oh=d272a3fbde01b1f176662957b8461ead&oe=5E00F6EC' + }], + 'statistics': { + 'actual': { + 'likeCount': 3164, + 'shareCount': 3183, + 'commentCount': 234, + 'loveCount': 893, + 'wowCount': 19, + 'hahaCount': 828, + 'sadCount': 0, + 'angryCount': 1, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 337, + 'shareCount': 250, + 'commentCount': 67, + 'loveCount': 32, + 'wowCount': 7, + 'hahaCount': 46, + 'sadCount': 4, + 'angryCount': 3, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 48728, + 'name': 'Faves USA', + 'handle': 'thefavesusa', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13590243_1529567430402751_5505197343663543097_n.jpg?_nc_cat=1&_nc_oc=AQlqHYa5f3hh3Tu7bwL_7yF5WVkxCnE2WIU8c_5Fs_eMudF84ODKZoLqn8S3lZDdt3g&_nc_ht=scontent.xx&oh=b45134ffcb1aa806ced2cb018887de04&oe=5E0ED98A', + 'subscriberCount': 6323373, + 'url': 'https://www.facebook.com/532854420074062', + 'platform': 'Facebook', + 'platformId': '532854420074062', + 'verified': True + }, + 'videoLengthMS': 34238 + }, { + 'id': 70161065649, + 'platformId': '172526489431467_3227961787221240', + 'platform': 'Facebook', + 'date': '2019-09-07 20:00:08', + 'updated': '2019-09-08 00:24:24', + 'type': 'link', + 'title': 'Rob Reiner Declares: ‘The Impeachment Process Is About To Begin’ | Tea Party', + 'caption': 'teaparty.org', + 'description': 'Rob Reiner Declares: ‘The Impeachment Process Is About To Begin’ (Breitbart) – Hollywood actor-director Rob Reiner took to Twitter on Saturday and declared that “the impeachment process is about to begin,” against President Donald Trump. “The Impeachment process is about to begin. The ar...', + 'message': 'Rob Reiner Declares: ‘The Impeachment Process Is About To Begin’', + 'expandedLinks': [{ + 'original': 'http://ow.ly/thAc30puHYx', + 'expanded': 'https://www.teaparty.org/rob-reiner-declares-the-impeachment-process-is-about-to-begin-408805/' + }], + 'link': 'http://ow.ly/thAc30puHYx', + 'postUrl': 'https://www.facebook.com/teapartyorg/posts/3227961787221240', + 'subscriberCount': 416823, + 'score': 11.11111111111111, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQB-IgFpD1LsDy90&w=720&h=720&url=https%3A%2F%2Fwww.teaparty.org%2Fwp-content%2Fuploads%2F2019%2F09%2Freiner-new.jpg&cfs=1&_nc_hash=AQDAvJ6ay40Yyv9K', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBEtc-nGN1pXKKM&url=https%3A%2F%2Fwww.teaparty.org%2Fwp-content%2Fuploads%2F2019%2F09%2Freiner-new.jpg&_nc_hash=AQDV8GfnZCDBS9dx' + }], + 'statistics': { + 'actual': { + 'likeCount': 8, + 'shareCount': 15, + 'commentCount': 194, + 'loveCount': 0, + 'wowCount': 1, + 'hahaCount': 134, + 'sadCount': 1, + 'angryCount': 47, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 6, + 'shareCount': 5, + 'commentCount': 7, + 'loveCount': 2, + 'wowCount': 2, + 'hahaCount': 5, + 'sadCount': 1, + 'angryCount': 8, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 370587, + 'name': 'Tea Party', + 'handle': 'teapartyorg', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10645152_1119529788064461_6831324369519464936_n.png?_nc_cat=109&_nc_oc=AQlYPwkxXVKsTPXKN2iEw6-kekm3w1t-TNKlGRez6lg5WNmUCadSHtPr1aKi7-vMXx0&_nc_ht=scontent.xx&oh=47cea2dd8d0821de871a1f427d4cc9c3&oe=5E051C4C', + 'subscriberCount': 416797, + 'url': 'https://www.facebook.com/172526489431467', + 'platform': 'Facebook', + 'platformId': '172526489431467', + 'verified': True + } + }, { + 'id': 70157545859, + 'platformId': '131459315949_10157157161525950', + 'platform': 'Facebook', + 'date': '2019-09-07 18:59:47', + 'updated': '2019-09-08 00:28:55', + 'type': 'video', + 'caption': 'cbsnews.com', + 'description': ' ', + 'message': 'President Trump has been slamming the media for the coverage of his claim that Alabama was in grave danger from Hurricane Dorian.', + 'expandedLinks': [{ + 'original': 'https://cbsn.ws/300OGXC', + 'expanded': 'https://www.cbsnews.com/news/noaa-backs-up-president-trump-claim-that-alabama-could-be-affected-by-hurricane-2019-09-07/?ftag=CNM-00-10aab6a&linkId=73317898' + }], + 'link': 'https://cbsn.ws/300OGXC', + 'postUrl': 'https://www.facebook.com/CBSNews/posts/10157157161525950', + 'subscriberCount': 5892543, + 'score': 11.091743119266056, + 'media': [{ + 'type': 'video', + 'url': 'https://public.vilynx.com/direct/8fc31712de713e0c34c55c4bce033614/dfa46269-dee8-47d5-a4d2-75d7e30ed087/pro69.viwindow.mp4', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCbyqBM5kDSW7D5&w=630&h=630&url=https%3A%2F%2Fcbsnews2.cbsistatic.com%2Fhub%2Fi%2Fr%2F2019%2F08%2F29%2F0a29d008-ae46-4e99-b300-93e27828e182%2Fthumbnail%2F1200x630%2F40ae14190bd7d89123364faa648de9c9%2Fgettyimages-1164696989.jpg&cfs=1&sx=323&sy=0&sw=630&sh=630&_nc_hash=AQBfp4rDK-YSkYIB', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBoZQPh51tH66xT&w=1200&h=630&url=https%3A%2F%2Fcbsnews2.cbsistatic.com%2Fhub%2Fi%2Fr%2F2019%2F08%2F29%2F0a29d008-ae46-4e99-b300-93e27828e182%2Fthumbnail%2F1200x630%2F40ae14190bd7d89123364faa648de9c9%2Fgettyimages-1164696989.jpg&crop&sx=0&sy=0&sw=1200&sh=630&_nc_hash=AQDhDDBJSu3_Lwws' + }], + 'statistics': { + 'actual': { + 'likeCount': 362, + 'shareCount': 160, + 'commentCount': 710, + 'loveCount': 28, + 'wowCount': 33, + 'hahaCount': 674, + 'sadCount': 35, + 'angryCount': 416, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 76, + 'shareCount': 33, + 'commentCount': 46, + 'loveCount': 8, + 'wowCount': 11, + 'hahaCount': 19, + 'sadCount': 16, + 'angryCount': 9, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 14655, + 'name': 'CBS News', + 'handle': 'CBSNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/c7.0.200.200a/p200x200/11052868_10153128917450950_7657871426571821819_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlXjGTrfksAnoG50hBe4WDnf00w6XeLzrCR-xvjCQkB_VlwwTuquCV4zQB0tMkmVTU&_nc_ht=scontent.xx&oh=66fa68d473b2015c3875d62e625a12d1&oe=5E0EF6CB', + 'subscriberCount': 5892766, + 'url': 'https://www.facebook.com/131459315949', + 'platform': 'Facebook', + 'platformId': '131459315949', + 'verified': True + } + }, { + 'id': 70174483693, + 'platformId': '273864989376427_2990274061068826', + 'platform': 'Facebook', + 'date': '2019-09-07 23:57:09', + 'updated': '2019-09-08 00:44:08', + 'type': 'link', + 'title': 'Trump says he was set to hold secret talks with Taliban at Camp David in the US', + 'caption': 'nbcnews.com', + 'description': ' ', + 'message': 'BREAKING: President Trump says he was set to hold secret talks with the Taliban at Camp David in the US this weekend, but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://on.msnbc.com/2LB1dvs', + 'expandedLinks': [{ + 'original': 'https://on.msnbc.com/2LB1dvs', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_ms_fb_ma&fbclid=IwAR1hvIf0wom7aKl4oj50ODjDPVtW24tM42WDeAzbY4olTUyN3dg3nUdQ3CI' + }, { + 'original': 'https://on.msnbc.com/2LB1dvs', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_ms_fb_ma&fbclid=IwAR1hvIf0wom7aKl4oj50ODjDPVtW24tM42WDeAzbY4olTUyN3dg3nUdQ3CI' + }], + 'link': 'https://on.msnbc.com/2LB1dvs', + 'postUrl': 'https://www.facebook.com/msnbc/posts/2990274061068826', + 'subscriberCount': 2290452, + 'score': 11.05019305019305, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 136, + 'shareCount': 430, + 'commentCount': 961, + 'loveCount': 7, + 'wowCount': 117, + 'hahaCount': 765, + 'sadCount': 15, + 'angryCount': 431, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 61, + 'shareCount': 33, + 'commentCount': 67, + 'loveCount': 5, + 'wowCount': 10, + 'hahaCount': 23, + 'sadCount': 16, + 'angryCount': 44, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 8324, + 'name': 'MSNBC', + 'handle': 'msnbc', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15741035_1414682885294626_1846918595507309997_n.jpg?_nc_cat=1&_nc_oc=AQmNSDImiJ4dNS4a9BuTF3tFyF2W0xSOLxgQfdY6R_AXaZm8hkQc6XT-GWy5NIEe080&_nc_ht=scontent.xx&oh=968e2c2f1d76f19278ac5985b55af46d&oe=5E003BB2', + 'subscriberCount': 2290512, + 'url': 'https://www.facebook.com/273864989376427', + 'platform': 'Facebook', + 'platformId': '273864989376427', + 'verified': True + } + }, { + 'id': 70157512987, + 'platformId': '167115176655082_2994611437238761', + 'platform': 'Facebook', + 'date': '2019-09-07 19:00:10', + 'updated': '2019-09-08 00:25:19', + 'type': 'link', + 'title': "The First People to See 'Joker' Say It's Absolutely Incredible", + 'caption': 'vice.com', + 'description': '"It\'s a masterpiece."', + 'message': 'Wow.', + 'expandedLinks': [{ + 'original': 'https://www.vice.com/en_us/article/43kw9m/the-first-people-to-see-joker-say-its-absolutely-incredible?utm_source=vicefbus', + 'expanded': 'https://www.vice.com/en_us/article/43kw9m/the-first-people-to-see-joker-say-its-absolutely-incredible?utm_source=vicefbus' + }], + 'link': 'https://www.vice.com/en_us/article/43kw9m/the-first-people-to-see-joker-say-its-absolutely-incredible?utm_source=vicefbus', + 'postUrl': 'https://www.facebook.com/VICE/posts/2994611437238761', + 'subscriberCount': 8174144, + 'score': 10.957142857142857, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAnGhBeKGypWT01&w=674&h=674&url=https%3A%2F%2Fvideo-images.vice.com%2Farticles%2F5d689eaf5bd4cf000aabe8f6%2Flede%2F1567344325767-Screen-Shot-2019-09-01-at-92510-AM.png%3Fcrop%3D0.7819xw%3A0.8236xh%3B0.1156xw%2C0.0612xh%26resize%3D1200%3A%2A&cfs=1&sx=298&sy=0&sw=674&sh=674&_nc_hash=AQDAKv3kHukpFn1V', + 'height': 674, + 'width': 674, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQA8VrX1m3AEk4av&url=https%3A%2F%2Fvideo-images.vice.com%2Farticles%2F5d689eaf5bd4cf000aabe8f6%2Flede%2F1567344325767-Screen-Shot-2019-09-01-at-92510-AM.png%3Fcrop%3D0.7819xw%3A0.8236xh%3B0.1156xw%2C0.0612xh%26resize%3D1200%3A%2A&_nc_hash=AQBrUKcye7gfy7Yr' + }], + 'statistics': { + 'actual': { + 'likeCount': 1104, + 'shareCount': 102, + 'commentCount': 181, + 'loveCount': 117, + 'wowCount': 23, + 'hahaCount': 7, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 57, + 'shareCount': 21, + 'commentCount': 30, + 'loveCount': 6, + 'wowCount': 7, + 'hahaCount': 12, + 'sadCount': 3, + 'angryCount': 4, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6646, + 'name': 'VICE', + 'handle': 'VICE', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13427861_1304295039603751_2178102892370936049_n.jpg?_nc_cat=1&_nc_oc=AQmzoEUjC5BCCMVSsIFvWa52KGr3Iqh9f0Y_eezqYMFw7h_EUam7WQdYxEFvJB6LoP0&_nc_ht=scontent.xx&oh=847f8eb6c5132c90382bc0940afbc692&oe=5E02C5BA', + 'subscriberCount': 8177544, + 'url': 'https://www.facebook.com/167115176655082', + 'platform': 'Facebook', + 'platformId': '167115176655082', + 'verified': True + } + }, { + 'id': 70157378615, + 'platformId': '189885532970_10157980386047971', + 'platform': 'Facebook', + 'date': '2019-09-07 19:00:00', + 'updated': '2019-09-08 00:18:45', + 'type': 'link', + 'title': "'Eat Less Meat': 2020 Democrats Mull Changing Laws on Meat Consumption to Fight Climate Change", + 'caption': 'ijr.com', + 'description': 'Several of the 2020 Democratic presidential contenders took to the stage of Thursday night to lay out their plan to address climate change, and in doing so, some of them mulled ideas of changing laws surrounding meat consumption.', + 'message': 'In case you missed it:', + 'expandedLinks': [{ + 'original': 'https://ijr.com/2020-democrats-mull-changing-laws-meat-consumption-climate-change/', + 'expanded': 'https://ijr.com/2020-democrats-mull-changing-laws-meat-consumption-climate-change/' + }], + 'link': 'https://ijr.com/2020-democrats-mull-changing-laws-meat-consumption-climate-change/', + 'postUrl': 'https://www.facebook.com/IJRRed/posts/10157980386047971', + 'subscriberCount': 8532193, + 'score': 10.8932527693857, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQACOVQMpvwNJjWm&w=450&h=450&url=https%3A%2F%2F242358-745360-raikfcquaxqncofqfm.stackpathdns.com%2Fwp-content%2Fuploads%2F2019%2F09%2Fphotomix-image-2019-09-05T112755.284.jpg&cfs=1&sx=183&sy=0&sw=450&sh=450&_nc_hash=AQCY2wd2lhyIQTks', + 'height': 450, + 'width': 450, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAoypPZmdrTpzAQ&url=https%3A%2F%2F242358-745360-raikfcquaxqncofqfm.stackpathdns.com%2Fwp-content%2Fuploads%2F2019%2F09%2Fphotomix-image-2019-09-05T112755.284.jpg&_nc_hash=AQCh4NFb432qR6pj' + }], + 'statistics': { + 'actual': { + 'likeCount': 421, + 'shareCount': 3158, + 'commentCount': 2513, + 'loveCount': 8, + 'wowCount': 258, + 'hahaCount': 2667, + 'sadCount': 84, + 'angryCount': 1708, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 400, + 'shareCount': 120, + 'commentCount': 258, + 'loveCount': 15, + 'wowCount': 21, + 'hahaCount': 110, + 'sadCount': 11, + 'angryCount': 58, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 30245, + 'name': 'IJR Red', + 'handle': 'IJRRed', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/23376285_10156265164197971_2450414612163288246_n.jpg?_nc_cat=1&_nc_oc=AQm4KDy-Qmj38dJbaAQ0KXPVdY94zu7JBQAIUkAO2_W0uRWIl-5aI18nffFvxZoVICg&_nc_ht=scontent.xx&oh=ab7b4676afa9874079a36c20150411f5&oe=5E0C3B40', + 'subscriberCount': 8531658, + 'url': 'https://www.facebook.com/189885532970', + 'platform': 'Facebook', + 'platformId': '189885532970', + 'verified': True + } + }, { + 'id': 70164096778, + 'platformId': '709959352435346_2345707418860523', + 'platform': 'Facebook', + 'date': '2019-09-07 21:11:27', + 'updated': '2019-09-08 00:41:40', + 'type': 'photo', + 'message': '“Congratulations to the University of Nebraska on being the first school in the nation with home stadiums in both the Big Ten and the Pac-12." - Ben Sasse', + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/SenatorSasse/photos/a.730973980333883/2345706642193934/?type=3', + 'expanded': 'https://www.facebook.com/SenatorSasse/photos/a.730973980333883/2345706642193934/?type=3' + }], + 'link': 'https://www.facebook.com/SenatorSasse/photos/a.730973980333883/2345706642193934/?type=3', + 'postUrl': 'https://www.facebook.com/SenatorSasse/posts/2345707418860523', + 'subscriberCount': 47255, + 'score': 10.714285714285714, + 'media': [{ + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t1.0-9/s720x720/69639380_2345706645527267_3280132400163586048_o.jpg?_nc_cat=101&_nc_oc=AQnRAbvB5XIVIOEbeUHM6drPHgfaP4ShhF7VuYpxbXGE3wfw1GONw_NvPgJGwAFJeoE&_nc_ht=scontent.xx&oh=026b7c097cfa8b96018a93274a1c289f&oe=5E150C5D', + 'height': 432, + 'width': 720, + 'full': 'https://scontent.xx.fbcdn.net/v/t1.0-9/s720x720/69639380_2345706645527267_3280132400163586048_o.jpg?_nc_cat=101&_nc_oc=AQnRAbvB5XIVIOEbeUHM6drPHgfaP4ShhF7VuYpxbXGE3wfw1GONw_NvPgJGwAFJeoE&_nc_ht=scontent.xx&oh=026b7c097cfa8b96018a93274a1c289f&oe=5E150C5D' + }], + 'statistics': { + 'actual': { + 'likeCount': 766, + 'shareCount': 385, + 'commentCount': 67, + 'loveCount': 141, + 'wowCount': 6, + 'hahaCount': 358, + 'sadCount': 1, + 'angryCount': 1, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 90, + 'shareCount': 8, + 'commentCount': 13, + 'loveCount': 32, + 'wowCount': 3, + 'hahaCount': 3, + 'sadCount': 7, + 'angryCount': 5, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 124328, + 'name': 'Senator Ben Sasse', + 'handle': 'SenatorSasse', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/32511931_1615949251836347_83407049412575232_n.jpg?_nc_cat=1&_nc_oc=AQmcObZPPXWZnOrR03LfCfbo0_O7SZ4plnkjx56iJ4PDvv4E3SP7GVUSS9glUE4IAbo&_nc_ht=scontent.xx&oh=572cd1c7d7bca987cd593514df1d6afb&oe=5DCA9FE3', + 'subscriberCount': 47281, + 'url': 'https://www.facebook.com/709959352435346', + 'platform': 'Facebook', + 'platformId': '709959352435346', + 'verified': True + } + }, { + 'id': 70160675221, + 'platformId': '13312631635_10157392187846636', + 'platform': 'Facebook', + 'date': '2019-09-07 20:01:21', + 'updated': '2019-09-08 00:42:43', + 'type': 'link', + 'title': 'Intelligent people tend to be messy, stay awake longer, and swear more', + 'caption': 'independent.co.uk', + 'description': "If you think about it, those who don't use any swear words are the ones who limit their vocabulary", + 'message': 'Just in case you forgot', + 'expandedLinks': [{ + 'original': 'http://www.independent.co.uk/news/science/intelligent-people-tend-to-be-messy-stay-awake-longer-and-swear-more-a7174256.html?utm_medium=Social&utm_source=Facebook#Echobox=1567847014', + 'expanded': 'https://www.independent.co.uk/news/science/intelligent-people-tend-to-be-messy-stay-awake-longer-and-swear-more-a7174256.html?utm_medium=Social&utm_source=Facebook' + }], + 'link': 'http://www.independent.co.uk/news/science/intelligent-people-tend-to-be-messy-stay-awake-longer-and-swear-more-a7174256.html?utm_medium=Social&utm_source=Facebook#Echobox=1567847014', + 'postUrl': 'https://www.facebook.com/TheIndependentOnline/posts/10157392187846636', + 'subscriberCount': 8832865, + 'score': 10.694656488549619, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDTI-q2CoWnVI6v&w=720&h=720&url=https%3A%2F%2Fstatic.independent.co.uk%2Fs3fs-public%2Fthumbnails%2Fimage%2F2015%2F10%2F13%2F14%2FJennifer-Lawrence.jpg&cfs=1&sx=246&sy=0&sw=1000&sh=1000&_nc_hash=AQBwG9rCHx8nZd9s', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQA5nGOIbrLuuzOd&url=https%3A%2F%2Fstatic.independent.co.uk%2Fs3fs-public%2Fthumbnails%2Fimage%2F2015%2F10%2F13%2F14%2FJennifer-Lawrence.jpg&_nc_hash=AQDlXAqho5KxD282' + }], + 'statistics': { + 'actual': { + 'likeCount': 1255, + 'shareCount': 722, + 'commentCount': 486, + 'loveCount': 122, + 'wowCount': 9, + 'hahaCount': 205, + 'sadCount': 2, + 'angryCount': 1, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 90, + 'shareCount': 44, + 'commentCount': 58, + 'loveCount': 9, + 'wowCount': 10, + 'hahaCount': 27, + 'sadCount': 8, + 'angryCount': 16, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 19065, + 'name': 'The Independent', + 'handle': 'TheIndependentOnline', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/11051795_10152732082756636_6705742038347351188_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQmApCC_log9_TfPU5-TLVRKHyBo2YH6UPG2d6R-43r5u7HhElr7QPKk9J_AXR9q1Ac&_nc_ht=scontent.xx&oh=47ac79067cb2e33520f6920eb409611d&oe=5E0FED75', + 'subscriberCount': 8834731, + 'url': 'https://www.facebook.com/13312631635', + 'platform': 'Facebook', + 'platformId': '13312631635', + 'verified': True + } + }, { + 'id': 70167308119, + 'platformId': '134486075205_10163968846050206', + 'platform': 'Facebook', + 'date': '2019-09-07 22:06:05', + 'updated': '2019-09-08 00:43:59', + 'type': 'link', + 'title': 'Fisherman performs C-section on dead pregnant shark, pulls out 98 live pups', + 'caption': 'nypost.com', + 'description': 'This is the incredible moment a fisherman performs a C-section on a dead shark – and releases 98 live babies back into the wild. Mathew Orlov carried out the impromptu operation when he realized th…', + 'message': '“When I saw the belly moving, instinct kicked in."', + 'expandedLinks': [{ + 'original': 'https://trib.al/LgggOVZ', + 'expanded': 'https://nypost.com/2018/04/13/fisherman-pulls-98-live-babies-out-of-dead-shark/?utm_medium=SocialFlow&sr_share=facebook&utm_source=NYPFacebook&utm_campaign=SocialFlow&fbclid=IwAR3o967WReYiKfY2dLOU9zIaEcsGgLAy5p9d0pU-GiXMLnzc8ZZ7Pe5CSxQ' + }], + 'link': 'https://trib.al/LgggOVZ', + 'postUrl': 'https://www.facebook.com/NYPost/posts/10163968846050206', + 'subscriberCount': 4182920, + 'score': 10.654450261780104, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBUGloTZw1nxKQg&w=720&h=720&url=https%3A%2F%2Fthenypost.files.wordpress.com%2F2018%2F04%2F180413-shark-c-section-dead-shark-index.jpg%3Fquality%3D90%26strip%3Dall%26w%3D1200&cfs=1&_nc_hash=AQBnfnyTsHsawpB1', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBwye2XJlVhygAX&url=https%3A%2F%2Fthenypost.files.wordpress.com%2F2018%2F04%2F180413-shark-c-section-dead-shark-index.jpg%3Fquality%3D90%26strip%3Dall%26w%3D1200&_nc_hash=AQDS28jMlElJ1X8_' + }], + 'statistics': { + 'actual': { + 'likeCount': 704, + 'shareCount': 567, + 'commentCount': 78, + 'loveCount': 166, + 'wowCount': 482, + 'hahaCount': 6, + 'sadCount': 28, + 'angryCount': 4, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 48, + 'shareCount': 42, + 'commentCount': 47, + 'loveCount': 5, + 'wowCount': 12, + 'hahaCount': 20, + 'sadCount': 8, + 'angryCount': 9, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 10342, + 'name': 'New York Post', + 'handle': 'NYPost', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12932928_10157483552025206_1176575955706691041_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQnPmbZuC7S1v1NTPRZ7rWQU4EucwAW3nKx-aXD0PzlPsD3ifQpdaLcXEegH730Wy_o&_nc_ht=scontent.xx&oh=c77d86309611fa2972df1979bf6cab9e&oe=5E0827CA', + 'subscriberCount': 4183079, + 'url': 'https://www.facebook.com/134486075205', + 'platform': 'Facebook', + 'platformId': '134486075205', + 'verified': True + } + }, { + 'id': 70159375367, + 'platformId': '5281959998_10152009926804999', + 'platform': 'Facebook', + 'date': '2019-09-07 19:40:00', + 'updated': '2019-09-08 00:47:27', + 'type': 'link', + 'title': 'A Virginia University Offers Free Semester to Students in Bahamas Displaced by Hurricane', + 'caption': 'nytimes.com', + 'description': 'In the wake of Hurricane Dorian, Hampton University will open its doors to students from the University of the Bahamas.', + 'message': 'Hampton University, a historically black institution in Virginia, is offering a semester of free room, board and tuition to University of the Bahamas students affected by Hurricane Dorian.', + 'expandedLinks': [{ + 'original': 'https://www.nytimes.com/2019/09/07/us/hampton-university-bahamas.html?smid=fb-nytimes&smtyp=cur', + 'expanded': 'https://www.nytimes.com/2019/09/07/us/hampton-university-bahamas.html?smid=fb-nytimes&smtyp=cur' + }], + 'link': 'https://www.nytimes.com/2019/09/07/us/hampton-university-bahamas.html?smid=fb-nytimes&smtyp=cur', + 'postUrl': 'https://www.facebook.com/nytimes/posts/10152009926804999', + 'subscriberCount': 16854203, + 'score': 10.245652173913044, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQC3W5MeqRKPesIZ&w=720&h=720&url=https%3A%2F%2Fstatic01.nyt.com%2Fimages%2F2019%2F09%2F08%2Fmultimedia%2F08xp-hampton%2F05xp-hampton-facebookJumbo.jpg&cfs=1&_nc_hash=AQCcTlBbi1yooU7X', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBVcBFba0vlBYuZ&url=https%3A%2F%2Fstatic01.nyt.com%2Fimages%2F2019%2F09%2F08%2Fmultimedia%2F08xp-hampton%2F05xp-hampton-facebookJumbo.jpg&_nc_hash=AQBVEUeUjhFuwrpM' + }], + 'statistics': { + 'actual': { + 'likeCount': 3250, + 'shareCount': 417, + 'commentCount': 81, + 'loveCount': 897, + 'wowCount': 64, + 'hahaCount': 3, + 'sadCount': 0, + 'angryCount': 1, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 198, + 'shareCount': 70, + 'commentCount': 89, + 'loveCount': 14, + 'wowCount': 21, + 'hahaCount': 27, + 'sadCount': 19, + 'angryCount': 22, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 7132, + 'name': 'The New York Times', + 'handle': 'nytimes', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t34.0-1/p200x200/38987133_2766049203424553_1238434690_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkaWRCuHf9GL6ACpzc33xhzk0PaoZZpZJjgHAUJqYB_x5SH2TI2LqBRTlosS59Dtlw&_nc_ht=scontent.xx&oh=6c30114417175d395e99d2e75167ad16&oe=5D765D57', + 'subscriberCount': 16854715, + 'url': 'https://www.facebook.com/5281959998', + 'platform': 'Facebook', + 'platformId': '5281959998', + 'verified': True + } + }, { + 'id': 70167645982, + 'platformId': '131459315949_10157157628655950', + 'platform': 'Facebook', + 'date': '2019-09-07 22:19:43', + 'updated': '2019-09-08 00:28:55', + 'type': 'video', + 'caption': 'cbsnews.com', + 'description': ' ', + 'message': 'The vote signifies an escalation of the inquiry into whether President Trump should be impeached.', + 'expandedLinks': [{ + 'original': 'https://cbsn.ws/34ucx5h', + 'expanded': 'https://www.cbsnews.com/news/house-judiciary-committee-to-vote-on-defining-impeachment-inquiry-2019-09-07/?ftag=CNM-00-10aab6a&linkId=73322081' + }], + 'link': 'https://cbsn.ws/34ucx5h', + 'postUrl': 'https://www.facebook.com/CBSNews/posts/10157157628655950', + 'subscriberCount': 5892543, + 'score': 10.166666666666666, + 'media': [{ + 'type': 'video', + 'url': 'https://public.vilynx.com/direct/8fc31712de713e0c34c55c4bce033614/65d47c3b-5c2f-413e-b8a2-11c147162256/pro69.viwindow.mp4', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBW8RXTe_M1XwuG&w=630&h=630&url=https%3A%2F%2Fcbsnews2.cbsistatic.com%2Fhub%2Fi%2Fr%2F2019%2F07%2F24%2F16269e5d-742b-4d25-bf3e-ab7e89bff231%2Fthumbnail%2F1200x630%2Fc0fbe34dbb7b09ee3a51a3732fb0568e%2Fcbsn-fusion-cbs-news-special-report-opening-statement-jerry-nadler-thumbnail-1897399-640x360.jpg&cfs=1&sx=288&sy=0&sw=630&sh=630&_nc_hash=AQC7qXK3Rnuf8cKx', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCfVhSfr4ER8ue6&w=1200&h=630&url=https%3A%2F%2Fcbsnews2.cbsistatic.com%2Fhub%2Fi%2Fr%2F2019%2F07%2F24%2F16269e5d-742b-4d25-bf3e-ab7e89bff231%2Fthumbnail%2F1200x630%2Fc0fbe34dbb7b09ee3a51a3732fb0568e%2Fcbsn-fusion-cbs-news-special-report-opening-statement-jerry-nadler-thumbnail-1897399-640x360.jpg&crop&sx=0&sy=0&sw=1200&sh=630&_nc_hash=AQC5DoCNSb-UglIh' + }], + 'statistics': { + 'actual': { + 'likeCount': 828, + 'shareCount': 161, + 'commentCount': 395, + 'loveCount': 181, + 'wowCount': 13, + 'hahaCount': 243, + 'sadCount': 5, + 'angryCount': 65, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 63, + 'shareCount': 27, + 'commentCount': 43, + 'loveCount': 8, + 'wowCount': 9, + 'hahaCount': 14, + 'sadCount': 14, + 'angryCount': 8, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 14655, + 'name': 'CBS News', + 'handle': 'CBSNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/c7.0.200.200a/p200x200/11052868_10153128917450950_7657871426571821819_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlXjGTrfksAnoG50hBe4WDnf00w6XeLzrCR-xvjCQkB_VlwwTuquCV4zQB0tMkmVTU&_nc_ht=scontent.xx&oh=66fa68d473b2015c3875d62e625a12d1&oe=5E0EF6CB', + 'subscriberCount': 5892766, + 'url': 'https://www.facebook.com/131459315949', + 'platform': 'Facebook', + 'platformId': '131459315949', + 'verified': True + } + }, { + 'id': 70169322403, + 'platformId': '131459315949_10157157685470950', + 'platform': 'Facebook', + 'date': '2019-09-07 22:39:50', + 'updated': '2019-09-08 00:28:55', + 'type': 'video', + 'caption': 'cbsnews.com', + 'description': ' ', + 'message': "Brody has autism and is nonverbal, but when he laid his head in Snow White's lap, she knew just what to do.", + 'expandedLinks': [{ + 'original': 'https://cbsn.ws/2ZVMfFt', + 'expanded': 'https://www.cbsnews.com/news/snow-white-comforts-boy-with-autism-who-had-a-meltdown-in-disney-world/?ftag=CNM-00-10aab6a&linkId=73322490' + }], + 'link': 'https://cbsn.ws/2ZVMfFt', + 'postUrl': 'https://www.facebook.com/CBSNews/posts/10157157685470950', + 'subscriberCount': 5892543, + 'score': 10.161111111111111, + 'media': [{ + 'type': 'video', + 'url': 'https://public.vilynx.com/direct/8fc31712de713e0c34c55c4bce033614/e9d4b8d2-3cdd-4db6-a57a-6cece6c6105f/pro69.viwindow.mp4', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBLzNfU6X0rpLci&w=630&h=630&url=https%3A%2F%2Fcbsnews1.cbsistatic.com%2Fhub%2Fi%2Fr%2F2019%2F09%2F04%2F81484701-f4ca-41be-9857-d7791a492ff4%2Fthumbnail%2F1200x630%2F47c61872eff69cc27da4106e169e7363%2Funtitled-collage-3.jpg&cfs=1&sx=43&sy=0&sw=630&sh=630&_nc_hash=AQDXFwpknxKfOMPn', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDGGpJ32waOZB6k&w=1200&h=630&url=https%3A%2F%2Fcbsnews1.cbsistatic.com%2Fhub%2Fi%2Fr%2F2019%2F09%2F04%2F81484701-f4ca-41be-9857-d7791a492ff4%2Fthumbnail%2F1200x630%2F47c61872eff69cc27da4106e169e7363%2Funtitled-collage-3.jpg&crop&sx=0&sy=0&sw=1200&sh=630&_nc_hash=AQAvhhzs725BcBMw' + }], + 'statistics': { + 'actual': { + 'likeCount': 763, + 'shareCount': 413, + 'commentCount': 37, + 'loveCount': 582, + 'wowCount': 5, + 'hahaCount': 1, + 'sadCount': 28, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 60, + 'shareCount': 26, + 'commentCount': 42, + 'loveCount': 8, + 'wowCount': 9, + 'hahaCount': 13, + 'sadCount': 14, + 'angryCount': 8, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 14655, + 'name': 'CBS News', + 'handle': 'CBSNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/c7.0.200.200a/p200x200/11052868_10153128917450950_7657871426571821819_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlXjGTrfksAnoG50hBe4WDnf00w6XeLzrCR-xvjCQkB_VlwwTuquCV4zQB0tMkmVTU&_nc_ht=scontent.xx&oh=66fa68d473b2015c3875d62e625a12d1&oe=5E0EF6CB', + 'subscriberCount': 5892766, + 'url': 'https://www.facebook.com/131459315949', + 'platform': 'Facebook', + 'platformId': '131459315949', + 'verified': True + } + }, { + 'id': 70172671434, + 'platformId': '268914272540_10156464486172541', + 'platform': 'Facebook', + 'date': '2019-09-07 23:40:03', + 'updated': '2019-09-08 00:30:46', + 'type': 'link', + 'title': 'EMT dad responds to fatal accident of country star Kylie Rae Harris, finds out daughter was the victim', + 'caption': 'nydailynews.com', + 'description': 'The EMT father of Maria Elena Cruz, the teen who fatally collided with country singer Kylie Rae Harris on Wednesday responded to the scene unaware that one of the victims was his daughter.', + 'message': 'Maria Elena Cruz, 16, who died in a collision with country singer Kylie Rae Harris was treated at the scene by her EMT father. Her father, Pedro Cruz, responded to the deadly accident completely unaware that his daughter was one of his victims.', + 'expandedLinks': [{ + 'original': 'https://trib.al/jNmLead', + 'expanded': 'https://www.nydailynews.com/news/crime/ny-kylie-rae-harris-and-victims-dad-20190907-exgtk5a4q5c33fm47gdbaafi6a-story.html?fbclid=IwAR350lwuh3az3OjnIMzY99g3KW5gT8eFuRNMtPacg2AGSn3VFJ5kNSACvqo&fbclid=IwAR0zXLqmmFONyYeaFgWirxx_FECHO_6iv3_bRcILozfgj8pnDFEtboIT1Ag' + }], + 'link': 'https://trib.al/jNmLead', + 'postUrl': 'https://www.facebook.com/NYDailyNews/posts/10156464486172541', + 'subscriberCount': 3119682, + 'score': 10.021739130434783, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAvv2wdaFi7vPsi&w=720&h=720&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2F-iBBbg5wyisMtnRhms2BcExkoWU%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2FW3CGH3O27NGTLGX3E3XB7XF3WE.jpg&cfs=1&_nc_hash=AQCMHXpSpTA-E4Is', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDh0aeIZrN3K3GQ&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2F-iBBbg5wyisMtnRhms2BcExkoWU%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2FW3CGH3O27NGTLGX3E3XB7XF3WE.jpg&_nc_hash=AQDrpc39QwLCuMCx' + }], + 'statistics': { + 'actual': { + 'likeCount': 79, + 'shareCount': 188, + 'commentCount': 68, + 'loveCount': 4, + 'wowCount': 50, + 'hahaCount': 0, + 'sadCount': 531, + 'angryCount': 2, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 22, + 'shareCount': 19, + 'commentCount': 16, + 'loveCount': 4, + 'wowCount': 7, + 'hahaCount': 10, + 'sadCount': 6, + 'angryCount': 8, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 18752, + 'name': 'New York Daily News', + 'handle': 'NYDailyNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/34963357_10155516739962541_1916910854155010048_n.jpg?_nc_cat=1&_nc_oc=AQmjFK4eo-CK8fL21CSJr1btV3Al6e74byD7EyXVL8apaCEHf5ql7TW_ZRkUiYID0qY&_nc_ht=scontent.xx&oh=e33f579d2d00c6afc68a0e7cbd70b6c8&oe=5E0623E1', + 'subscriberCount': 3120017, + 'url': 'https://www.facebook.com/268914272540', + 'platform': 'Facebook', + 'platformId': '268914272540', + 'verified': True + } + }, { + 'id': 70158925469, + 'platformId': '37763684202_10157814715809203', + 'platform': 'Facebook', + 'date': '2019-09-07 19:16:53', + 'updated': '2019-09-08 00:41:24', + 'type': 'link', + 'title': 'MIT Media Lab Director Joi Ito Resigns Amid New Jeffrey Epstein Revelations', + 'caption': 'thedailybeast.com', + 'description': 'The move comes a day after the New Yorker revealed that Media Lab’s financial relationship to Jeffrey Epstein was more deeply entangled than previously known.', + 'message': 'BREAKING: MIT Media Lab Director Joi Ito resigns amid new Jeffrey Epstein revelations', + 'expandedLinks': [{ + 'original': 'https://trib.al/WI1hIej', + 'expanded': 'https://www.thedailybeast.com/mit-media-lab-director-joi-ito-resigns-amid-new-jeffrey-epstein-revelations?via=FB_Page&source=TDB' + }], + 'link': 'https://trib.al/WI1hIej', + 'postUrl': 'https://www.facebook.com/thedailybeast/posts/10157814715809203', + 'subscriberCount': 2163205, + 'score': 9.9438202247191, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQByuRoL0gP8gqu8&w=720&h=720&url=https%3A%2F%2Fimg.thedailybeast.com%2Fimage%2Fupload%2Fc_crop%2Cd_placeholder_euli9k%2Ch_2777%2Cw_4938%2Cx_0%2Cy_0%2Fdpr_2.0%2Fc_limit%2Cw_740%2Ffl_lossy%2Cq_auto%2Fv1567883633%2FGettyImages-1052075272_zwmkfa&cfs=1&_nc_hash=AQBXZ5IEfkt2M0LV', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQA_kBIaNfGxPLto&url=https%3A%2F%2Fimg.thedailybeast.com%2Fimage%2Fupload%2Fc_crop%2Cd_placeholder_euli9k%2Ch_2777%2Cw_4938%2Cx_0%2Cy_0%2Fdpr_2.0%2Fc_limit%2Cw_740%2Ffl_lossy%2Cq_auto%2Fv1567883633%2FGettyImages-1052075272_zwmkfa&_nc_hash=AQDcf_ay2hmMa-1u' + }], + 'statistics': { + 'actual': { + 'likeCount': 303, + 'shareCount': 284, + 'commentCount': 38, + 'loveCount': 8, + 'wowCount': 192, + 'hahaCount': 48, + 'sadCount': 1, + 'angryCount': 11, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 19, + 'shareCount': 11, + 'commentCount': 14, + 'loveCount': 6, + 'wowCount': 6, + 'hahaCount': 14, + 'sadCount': 7, + 'angryCount': 12, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 7781, + 'name': 'The Daily Beast', + 'handle': 'thedailybeast', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/18447180_10155420999849203_1942956350622474660_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQlsvWaYHxyRC2B3NwwmVoV1kpqGNvYkkSxSr_lFopmdwhj-uerxTWu7CmbWz-8Qq-Q&_nc_ht=scontent.xx&oh=86caf840e49b739e6381c591317aab4b&oe=5DC85150', + 'subscriberCount': 2163118, + 'url': 'https://www.facebook.com/37763684202', + 'platform': 'Facebook', + 'platformId': '37763684202', + 'verified': True + } + }, { + 'id': 70165436882, + 'platformId': '182919686769_10156515005471770', + 'platform': 'Facebook', + 'date': '2019-09-07 21:30:07', + 'updated': '2019-09-08 00:40:44', + 'type': 'link', + 'title': 'Bernie Sanders Says Abortion Will Help Fight Climate Change', + 'caption': 'dailycaller.com', + 'description': 'Bernie Sanders Says Abortion Will Help Fight Climate Change', + 'message': 'Say what?!', + 'expandedLinks': [{ + 'original': 'https://dailycaller.com/2019/09/04/bernie-sanders-abortion-climate-change/', + 'expanded': 'https://dailycaller.com/2019/09/04/bernie-sanders-abortion-climate-change/' + }], + 'link': 'https://dailycaller.com/2019/09/04/bernie-sanders-abortion-climate-change/', + 'postUrl': 'https://www.facebook.com/DailyCaller/posts/10156515005471770', + 'subscriberCount': 5408428, + 'score': 9.84862385321101, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBAeviXgVv1qTKI&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f0adaa53100da2660e23%2F5351ecff4ddf98a16d14fb0334779a1b0dd0b176_49eaacdfd17f2229c1f6ce7274e4f7a6cd27f870_facebook&cfs=1&_nc_hash=AQCW4ug6vLqDQNsT', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAyo-47Q_tEYqem&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f0adaa53100da2660e23%2F5351ecff4ddf98a16d14fb0334779a1b0dd0b176_49eaacdfd17f2229c1f6ce7274e4f7a6cd27f870_facebook&_nc_hash=AQD4lc3SzFdaf30j' + }], + 'statistics': { + 'actual': { + 'likeCount': 221, + 'shareCount': 989, + 'commentCount': 1329, + 'loveCount': 1, + 'wowCount': 138, + 'hahaCount': 449, + 'sadCount': 114, + 'angryCount': 1053, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 133, + 'shareCount': 64, + 'commentCount': 121, + 'loveCount': 9, + 'wowCount': 16, + 'hahaCount': 48, + 'sadCount': 8, + 'angryCount': 37, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13489, + 'name': 'The Daily Caller', + 'handle': 'DailyCaller', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471', + 'subscriberCount': 5408115, + 'url': 'https://www.facebook.com/182919686769', + 'platform': 'Facebook', + 'platformId': '182919686769', + 'verified': True + } + }, { + 'id': 70158720115, + 'platformId': '5281959998_10152009911414999', + 'platform': 'Facebook', + 'date': '2019-09-07 19:25:00', + 'updated': '2019-09-08 00:47:27', + 'type': 'link', + 'title': 'Two Men Kiss in a Comic Book, and a Mayor Orders a Raid', + 'caption': 'nytimes.com', + 'description': 'The raid drew a backlash from festival organizers, publishing houses, comedians and, finally, a Brazilian court, which barred the mayor from further seizure efforts.', + 'message': "The mayor of Rio de Janeiro ordered law enforcement agents to raid the city's International Book Fair and seize copies of a comic book that featured 2 men kissing. The festival's organizers took him to court and won.", + 'expandedLinks': [{ + 'original': 'https://www.nytimes.com/2019/09/07/world/americas/rio-gay-kiss-comic.html?smid=fb-nytimes&smtyp=cur', + 'expanded': 'https://www.nytimes.com/2019/09/07/world/americas/rio-gay-kiss-comic.html?smid=fb-nytimes&smtyp=cur' + }], + 'link': 'https://www.nytimes.com/2019/09/07/world/americas/rio-gay-kiss-comic.html?smid=fb-nytimes&smtyp=cur', + 'postUrl': 'https://www.facebook.com/nytimes/posts/10152009911414999', + 'subscriberCount': 16854203, + 'score': 9.691304347826087, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBVIgoXJNg7nE2M&w=550&h=550&url=https%3A%2F%2Fstatic01.nyt.com%2Fimages%2F2019%2F09%2F07%2Fworld%2F07brazil-kiss%2F07brazil-kiss-facebookJumbo.jpg&cfs=1&sx=500&sy=0&sw=550&sh=550&_nc_hash=AQD90hjbBkmgAi-z', + 'height': 550, + 'width': 550, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCX5Hsk1zs-QnL_&url=https%3A%2F%2Fstatic01.nyt.com%2Fimages%2F2019%2F09%2F07%2Fworld%2F07brazil-kiss%2F07brazil-kiss-facebookJumbo.jpg&_nc_hash=AQAnEh_d0w0PqNut' + }], + 'statistics': { + 'actual': { + 'likeCount': 1684, + 'shareCount': 771, + 'commentCount': 289, + 'loveCount': 83, + 'wowCount': 263, + 'hahaCount': 473, + 'sadCount': 107, + 'angryCount': 788, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 198, + 'shareCount': 70, + 'commentCount': 89, + 'loveCount': 14, + 'wowCount': 21, + 'hahaCount': 27, + 'sadCount': 19, + 'angryCount': 22, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 7132, + 'name': 'The New York Times', + 'handle': 'nytimes', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t34.0-1/p200x200/38987133_2766049203424553_1238434690_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkaWRCuHf9GL6ACpzc33xhzk0PaoZZpZJjgHAUJqYB_x5SH2TI2LqBRTlosS59Dtlw&_nc_ht=scontent.xx&oh=6c30114417175d395e99d2e75167ad16&oe=5D765D57', + 'subscriberCount': 16854715, + 'url': 'https://www.facebook.com/5281959998', + 'platform': 'Facebook', + 'platformId': '5281959998', + 'verified': True + } + }, { + 'id': 70168080694, + 'platformId': '8304333127_10159038883103128', + 'platform': 'Facebook', + 'date': '2019-09-07 22:22:37', + 'updated': '2019-09-08 00:44:09', + 'type': 'link', + 'title': 'Whoa Canada! Bianca Andreescu Captures the U.S. Open Title', + 'caption': 'wsj.com', + 'description': 'Bianca Andreescu, a 19-year-old rising star from Canada, defeated Serena Williams 6-3, 7-5 to win the 2019 U.S. Open women’s singles title.', + 'message': "In a blowout that got tense late, 19-year-old rising star Bianca Andreescu defeated her idol to win Canada's first major tennis title.", + 'expandedLinks': [{ + 'original': 'https://on.wsj.com/34yi0YH', + 'expanded': 'https://www.wsj.com/articles/bianca-andreescu-wins-the-u-s-open-11567893664?mod=e2fb' + }], + 'link': 'https://on.wsj.com/34yi0YH', + 'postUrl': 'https://www.facebook.com/wsj/posts/10159038883103128', + 'subscriberCount': 6360114, + 'score': 9.640625, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQARwY3pIH7bBdxO&w=720&h=720&url=https%3A%2F%2Fimages.wsj.net%2Fim-105055%2Fsocial&cfs=1&_nc_hash=AQB51kJj_a5pmM8l', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAPkeH1BgXOxCNQ&url=https%3A%2F%2Fimages.wsj.net%2Fim-105055%2Fsocial&_nc_hash=AQAGTlQG2xuXvh_F' + }], + 'statistics': { + 'actual': { + 'likeCount': 434, + 'shareCount': 38, + 'commentCount': 52, + 'loveCount': 61, + 'wowCount': 24, + 'hahaCount': 3, + 'sadCount': 4, + 'angryCount': 1, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 26, + 'shareCount': 8, + 'commentCount': 9, + 'loveCount': 3, + 'wowCount': 3, + 'hahaCount': 8, + 'sadCount': 3, + 'angryCount': 4, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 10335, + 'name': 'The Wall Street Journal', + 'handle': 'wsj', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/26734229_10157192613173128_6286097899182572387_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkg3dR3V2rO72fdcQNc6Kdupv3fYH3-VXio9SvAwKULEi36QT0vhIKN0_FvohpQCGs&_nc_ht=scontent.xx&oh=f550584e1e7adab86d889e32b7468801&oe=5DFE7FE9', + 'subscriberCount': 6360356, + 'url': 'https://www.facebook.com/8304333127', + 'platform': 'Facebook', + 'platformId': '8304333127', + 'verified': True + } + }, { + 'id': 70158010147, + 'platformId': '56845382910_10157286969462911', + 'platform': 'Facebook', + 'date': '2019-09-07 19:05:44', + 'updated': '2019-09-08 00:45:25', + 'type': 'link', + 'title': "Laura Ingraham Tries To Drink Light Bulb-Stuffed Steak To 'Trigger' Liberals", + 'caption': 'huffpost.com', + 'description': "Spoiler alert: She learned she can't.", + 'message': "The Fox News host's earnest efforts to troll liberals reached a new, if not bizarre, level.", + 'expandedLinks': [{ + 'original': 'http://huffp.st/ePhzz9l', + 'expanded': 'https://www.huffpost.com/entry/laura-ingraham-tries-to-drink-light-bulb-steak-to-trigger-liberals_n_5d73c962e4b0fde50c2740cd?utm_campaign=hp_fb_pages&utm_source=politics_fb&ncid=fcbklnkushpmg00000013&utm_medium=facebook§ion=politics&fbclid=IwAR3r_8e8E48MWt0sKmt1Qh3SNXit6km5u1xbqTYhSE1dt3cvbQwO44vYIvc' + }], + 'link': 'http://huffp.st/ePhzz9l', + 'postUrl': 'https://www.facebook.com/HuffPostPolitics/posts/10157286969462911', + 'subscriberCount': 2107913, + 'score': 9.410468319559229, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDGta8H68A0fkUr&w=720&h=720&url=https%3A%2F%2Fimg.huffingtonpost.com%2Fasset%2F5d73dc653b00002a74d0c177.jpeg%3Fops%3D1778_1000&cfs=1&sx=345&sy=0&sw=1000&sh=1000&_nc_hash=AQC0jKBN9GGCpmxA', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBadF6Ee2wYjtdm&url=https%3A%2F%2Fimg.huffingtonpost.com%2Fasset%2F5d73dc653b00002a74d0c177.jpeg%3Fops%3D1778_1000&_nc_hash=AQApk4tBx_BXnnZz' + }], + 'statistics': { + 'actual': { + 'likeCount': 139, + 'shareCount': 785, + 'commentCount': 1213, + 'loveCount': 8, + 'wowCount': 153, + 'hahaCount': 995, + 'sadCount': 54, + 'angryCount': 69, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 108, + 'shareCount': 74, + 'commentCount': 86, + 'loveCount': 14, + 'wowCount': 11, + 'hahaCount': 34, + 'sadCount': 9, + 'angryCount': 27, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13844, + 'name': 'HuffPost Politics', + 'handle': 'HuffPostPolitics', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/18838902_10155124699752911_6971495653588629046_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQm5cko-OrQpOcPI-GqgP9V74INLYLzur0WIBNnrYmgNA33fLG0VMMxSWpg2i7235p0&_nc_ht=scontent.xx&oh=755100a2afdbaf29d5e08e613e66fc6e&oe=5DF42A6A', + 'subscriberCount': 2107783, + 'url': 'https://www.facebook.com/56845382910', + 'platform': 'Facebook', + 'platformId': '56845382910', + 'verified': True + } + }, { + 'id': 70158000246, + 'platformId': '15418366158_10157860918341159', + 'platform': 'Facebook', + 'date': '2019-09-07 19:00:32', + 'updated': '2019-09-08 00:44:19', + 'type': 'link', + 'title': 'Illinois Paid Millions in Medicaid for People Who Are Already Dead', + 'caption': 'pjmedia.com', + 'description': 'The proof is in the pudding, as they say. In other words, if you want to see the "competency" and "efficiency" of leftists at work, look to Illinois. Among a myriad of state-wide problems created by leftist...', + 'message': 'Illinois Paid Millions in Medicaid for People Who Are Already Dead', + 'expandedLinks': [{ + 'original': 'https://pjmedia.com/trending/illinois-paid-millions-in-medicaid-for-people-who-are-already-dead/', + 'expanded': 'https://pjmedia.com/trending/illinois-paid-millions-in-medicaid-for-people-who-are-already-dead/' + }], + 'link': 'https://pjmedia.com/trending/illinois-paid-millions-in-medicaid-for-people-who-are-already-dead/', + 'postUrl': 'https://www.facebook.com/PJMedia/posts/10157860918341159', + 'subscriberCount': 345146, + 'score': 9.321951219512195, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDFW_Wc6ALaK2iY&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73fbbbf7581602fd2d8373%2Fe0030b822866ea4dc215e239a15d2d0102103635_bb06c1fec8f7ae6f502565ee1d84966ccc5c6a5d_facebook&cfs=1&_nc_hash=AQC7X9-liB_eXQPl', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQA2yg1_Reu1pkV_&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73fbbbf7581602fd2d8373%2Fe0030b822866ea4dc215e239a15d2d0102103635_bb06c1fec8f7ae6f502565ee1d84966ccc5c6a5d_facebook&_nc_hash=AQD4tH4FmRBoAZse' + }], + 'statistics': { + 'actual': { + 'likeCount': 136, + 'shareCount': 1006, + 'commentCount': 152, + 'loveCount': 1, + 'wowCount': 140, + 'hahaCount': 28, + 'sadCount': 30, + 'angryCount': 418, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 40, + 'shareCount': 48, + 'commentCount': 30, + 'loveCount': 4, + 'wowCount': 8, + 'hahaCount': 27, + 'sadCount': 5, + 'angryCount': 43, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 546413, + 'name': 'PJ Media', + 'handle': 'PJMedia', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/11233498_10153918103746159_4425260475851381266_n.jpg?_nc_cat=1&_nc_oc=AQlsQcaTBN0IYmuAz9KhN7jR3MPlfGRQ6pQx6vtSV9AWa6eNztotI3-NTLX1xGzJ6zE&_nc_ht=scontent.xx&oh=15f625aebc03c1c0e428efec7e19fab3&oe=5E04568A', + 'subscriberCount': 345163, + 'url': 'https://www.facebook.com/15418366158', + 'platform': 'Facebook', + 'platformId': '15418366158', + 'verified': True + } + }, { + 'id': 70162497981, + 'platformId': '34407447433_10156144196982434', + 'platform': 'Facebook', + 'date': '2019-09-07 20:34:39', + 'updated': '2019-09-08 00:29:54', + 'type': 'link', + 'title': 'Roman Polanski, Convicted Child Rapist, Has Won Yet Another Prize', + 'caption': 'jezebel.com', + 'description': 'Roman Polanski, who pled guilty to unlawful sexual intercourse with a minor in 1977 and has yet to serve a single day of his sentence, has been awarded the Grand Jury Prize at the Venice International Film Festival for a film about a wrongfully accused man.\r\n', + 'message': 'Polanski has been persecuted with more than 20 awards since pleading guilty to child rape in 1978.', + 'expandedLinks': [{ + 'original': 'https://trib.al/HKSWKHH', + 'expanded': 'https://jezebel.com/roman-polanski-convicted-child-rapist-has-won-yet-ano-1837956995?rev=1567888256061&utm_medium=socialflow&utm_source=jezebel_facebook&utm_campaign=socialflow_jezebel_facebook' + }], + 'link': 'https://trib.al/HKSWKHH', + 'postUrl': 'https://www.facebook.com/Jezebel/posts/10156144196982434', + 'subscriberCount': 815764, + 'score': 8.741379310344827, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCw_aA4LmkJCTiz&w=720&h=720&url=https%3A%2F%2Fi.kinja-img.com%2Fgawker-media%2Fimage%2Fupload%2Fs--t-Q8raQV--%2Fc_fill%2Cfl_progressive%2Cg_center%2Ch_900%2Cq_80%2Cw_1600%2Fb7pworkdtxwfa4o2unar.jpg&cfs=1&_nc_hash=AQBzyRZiu5YH78R6', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAK3dExwA-1Y9kH&url=https%3A%2F%2Fi.kinja-img.com%2Fgawker-media%2Fimage%2Fupload%2Fs--t-Q8raQV--%2Fc_fill%2Cfl_progressive%2Cg_center%2Ch_900%2Cq_80%2Cw_1600%2Fb7pworkdtxwfa4o2unar.jpg&_nc_hash=AQAqMA4iHPKH2uV3' + }], + 'statistics': { + 'actual': { + 'likeCount': 35, + 'shareCount': 87, + 'commentCount': 28, + 'loveCount': 1, + 'wowCount': 12, + 'hahaCount': 2, + 'sadCount': 9, + 'angryCount': 333, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 14, + 'shareCount': 6, + 'commentCount': 7, + 'loveCount': 7, + 'wowCount': 4, + 'hahaCount': 5, + 'sadCount': 3, + 'angryCount': 12, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6753, + 'name': 'Jezebel', + 'handle': 'Jezebel', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10632833_10152652146387434_9205889665163163075_n.png?_nc_cat=1&_nc_oc=AQmE3moAD_e4DDH0Qk-IkkzGJ36IXDD-O29mmUauemxpi5JLbY-oMjPMeCglmwSb0Rs&_nc_ht=scontent.xx&oh=d7afae2d39ef36c76291f53c416d6c76&oe=5E0F900A', + 'subscriberCount': 815764, + 'url': 'https://www.facebook.com/34407447433', + 'platform': 'Facebook', + 'platformId': '34407447433', + 'verified': True + } + }, { + 'id': 70166555948, + 'platformId': '513813158657249_2597694686935742', + 'platform': 'Facebook', + 'date': '2019-09-07 22:00:19', + 'updated': '2019-09-08 00:48:01', + 'type': 'link', + 'title': 'Westboro Baptist Church Riots At Marine’s Funeral, Gets Greeted By Wall Of Bikers', + 'caption': 'taphaps.com', + 'description': "The Westboro Baptist Church decided to protest a Marine's funeral. When word of this leaked out, a group of unexpected guests decided to show up and teach them a lesson.", + 'message': 'The Westboro Baptist Church decided to protest a Marine’s funeral. When word of this leaked out, a group of unexpected guests decided to show up and teach them a lesson.', + 'expandedLinks': [{ + 'original': 'http://ow.ly/RSR930punma', + 'expanded': 'https://taphaps.com/westboro-marine-richard-bennett/' + }], + 'link': 'http://ow.ly/RSR930punma', + 'postUrl': 'https://www.facebook.com/MadWorldNewsCorp/posts/2597694686935742', + 'subscriberCount': 2133967, + 'score': 8.69260700389105, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBm9Rm06lWlbfsb&w=720&h=720&url=https%3A%2F%2Fi2.wp.com%2Ftaphaps.com%2Fwp-content%2Fuploads%2F2018%2F06%2Fcomposite_15278619383366.jpg%3Ffit%3D800%252C420%26ssl%3D1&cfs=1&_nc_hash=AQCM6dsvk8D4QlT7', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCICcsFt2wd20Iu&url=https%3A%2F%2Fi2.wp.com%2Ftaphaps.com%2Fwp-content%2Fuploads%2F2018%2F06%2Fcomposite_15278619383366.jpg%3Ffit%3D800%252C420%26ssl%3D1&_nc_hash=AQB2ggt2TiOntrL5' + }], + 'statistics': { + 'actual': { + 'likeCount': 955, + 'shareCount': 426, + 'commentCount': 483, + 'loveCount': 185, + 'wowCount': 14, + 'hahaCount': 5, + 'sadCount': 94, + 'angryCount': 72, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 65, + 'shareCount': 46, + 'commentCount': 63, + 'loveCount': 8, + 'wowCount': 6, + 'hahaCount': 12, + 'sadCount': 7, + 'angryCount': 50, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 279876, + 'name': 'Mad World News', + 'handle': 'MadWorldNewsCorp', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16649435_1331399193565304_7598140519586777175_n.png?_nc_cat=1&_nc_oc=AQkFf7jm82V9pnSg1x0Pqt0rlA2Yl-XqrdIF4h-iVA0BzRc8fXvud27Fd5_bf3n4adY&_nc_ht=scontent.xx&oh=db1ac67cb2a4dc589f0e879b97477ebd&oe=5E151F16', + 'subscriberCount': 2135169, + 'url': 'https://www.facebook.com/513813158657249', + 'platform': 'Facebook', + 'platformId': '513813158657249', + 'verified': False + } + }, { + 'id': 70175854511, + 'platformId': '182919686769_10156515368256770', + 'platform': 'Facebook', + 'date': '2019-09-08 00:30:08', + 'updated': '2019-09-08 00:40:44', + 'type': 'link', + 'title': 'Trump Using Military Funds To Build 175 Miles Of Border Wall', + 'caption': 'dailycaller.com', + 'description': "'Slap in the face'", + 'message': '🇺🇸 🇺🇸 🇺🇸', + 'expandedLinks': [{ + 'original': 'https://dailycaller.com/2019/09/04/trump-building-174-miles-border-wall/', + 'expanded': 'https://dailycaller.com/2019/09/04/trump-building-174-miles-border-wall/' + }], + 'link': 'https://dailycaller.com/2019/09/04/trump-building-174-miles-border-wall/', + 'postUrl': 'https://www.facebook.com/DailyCaller/posts/10156515368256770', + 'subscriberCount': 5408115, + 'score': 8.655913978494624, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAHAXubROblxiW2&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f17dbae9d20fe10df2a3%2F8c0635ef178086cb0d59e6adcbee7cbcb0b56194_7e72428da38eb398449cb2531f05fc7b5a13068d_facebook&cfs=1&_nc_hash=AQD9iqh3FO7_aK8L', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAsKlewCzcRYkrb&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f17dbae9d20fe10df2a3%2F8c0635ef178086cb0d59e6adcbee7cbcb0b56194_7e72428da38eb398449cb2531f05fc7b5a13068d_facebook&_nc_hash=AQC8ksYE43suLecj' + }], + 'statistics': { + 'actual': { + 'likeCount': 551, + 'shareCount': 29, + 'commentCount': 129, + 'loveCount': 88, + 'wowCount': 1, + 'hahaCount': 3, + 'sadCount': 1, + 'angryCount': 3, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 18, + 'shareCount': 11, + 'commentCount': 24, + 'loveCount': 4, + 'wowCount': 4, + 'hahaCount': 17, + 'sadCount': 3, + 'angryCount': 12, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13489, + 'name': 'The Daily Caller', + 'handle': 'DailyCaller', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471', + 'subscriberCount': 5408115, + 'url': 'https://www.facebook.com/182919686769', + 'platform': 'Facebook', + 'platformId': '182919686769', + 'verified': True + } + }, { + 'id': 70175812390, + 'platformId': '354522044588660_3474487762592057', + 'platform': 'Facebook', + 'date': '2019-09-08 00:00:32', + 'updated': '2019-09-08 00:39:40', + 'type': 'link', + 'title': "Cosplayer's lightsaber attachment for her amputated arm attracts praise from Mark Hamill", + 'caption': 'megaphone.upworthy.com', + 'description': 'Cosplayer Angel Giuffria and her best friend Trace Wilson created a cool look for San Diego Comic-Con 2019.', + 'message': 'Awesome!', + 'expandedLinks': [{ + 'original': 'https://buff.ly/34tgXti', + 'expanded': 'https://megaphone.upworthy.com/p/cosplayers-lightsaber-attachment-for-her-amputated-arm' + }], + 'link': 'https://buff.ly/34tgXti', + 'postUrl': 'https://www.facebook.com/Upworthy/posts/3474487762592057', + 'subscriberCount': 11752205, + 'score': 8.537234042553191, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBPqzN4S_bvpl0m&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d72d2eb595ead5af308f065%2F74ff4746953c774d7e0792890f3f767e79cdcbf8_6a4d91f27330f64461e65fc85106d014d73330f0_facebook&cfs=1&_nc_hash=AQDZ9p6ULaB7D16t', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAGvmrFVvDV0uv2&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d72d2eb595ead5af308f065%2F74ff4746953c774d7e0792890f3f767e79cdcbf8_6a4d91f27330f64461e65fc85106d014d73330f0_facebook&_nc_hash=AQBfrzkG0_ylPd8H' + }], + 'statistics': { + 'actual': { + 'likeCount': 1018, + 'shareCount': 210, + 'commentCount': 37, + 'loveCount': 296, + 'wowCount': 43, + 'hahaCount': 1, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 89, + 'shareCount': 36, + 'commentCount': 14, + 'loveCount': 23, + 'wowCount': 6, + 'hahaCount': 9, + 'sadCount': 6, + 'angryCount': 5, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 3919, + 'name': 'Upworthy', + 'handle': 'Upworthy', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1914363_1176320005742189_4709951186905632219_n.png?_nc_cat=1&_nc_oc=AQlPiX5mYxZC_Xj8_M4a7JZZvCD27izvAXTMtobXrLjwA4S5Pel-CsMh5GMouHt8LNg&_nc_ht=scontent.xx&oh=ba4e0db7c2521356dc17108d8aa4a12a&oe=5E04D944', + 'subscriberCount': 11752205, + 'url': 'https://www.facebook.com/354522044588660', + 'platform': 'Facebook', + 'platformId': '354522044588660', + 'verified': True + } + }, { + 'id': 70162842971, + 'platformId': '223649167822693_1260339267487006', + 'platform': 'Facebook', + 'date': '2019-09-07 20:50:47', + 'updated': '2019-09-08 00:44:36', + 'type': 'link', + 'title': 'Shia LaBeouf plays his own father in Honey Boy. He’s phenomenal.', + 'caption': 'vox.com', + 'description': 'LaBeouf wrote the film based on his own troubled childhood. It’s an exercise in extreme empathy, and a must-see.', + 'message': 'LaBeouf wrote the film based on his own troubled childhood. It’s an exercise in extreme empathy, and a must-see.', + 'expandedLinks': [{ + 'original': 'https://www.vox.com/culture/2019/9/7/20852678/honey-boy-review-shia-labeouf-tiff', + 'expanded': 'https://www.vox.com/culture/2019/9/7/20852678/honey-boy-review-shia-labeouf-tiff' + }], + 'link': 'https://www.vox.com/culture/2019/9/7/20852678/honey-boy-review-shia-labeouf-tiff', + 'postUrl': 'https://www.facebook.com/Vox/posts/1260339267487006', + 'subscriberCount': 2426078, + 'score': 8.523364485981308, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQC_nbiaF_fbmuK3&w=600&h=600&url=https%3A%2F%2Fcdn.vox-cdn.com%2Fthumbor%2FlrldWWBAN80k5UXEbmOxanwUTec%3D%2F36x0%3A1182x600%2Ffit-in%2F1200x630%2Fcdn.vox-cdn.com%2Fuploads%2Fchorus_asset%2Ffile%2F19175438%2Fhoneyboy.jpg&cfs=1&sx=260&sy=0&sw=600&sh=600&_nc_hash=AQAKI-eapCdMDK4s', + 'height': 600, + 'width': 600, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBx2RBl9c23yfdg&url=https%3A%2F%2Fcdn.vox-cdn.com%2Fthumbor%2FlrldWWBAN80k5UXEbmOxanwUTec%3D%2F36x0%3A1182x600%2Ffit-in%2F1200x630%2Fcdn.vox-cdn.com%2Fuploads%2Fchorus_asset%2Ffile%2F19175438%2Fhoneyboy.jpg&_nc_hash=AQDyxNebdOSSGnVR' + }], + 'statistics': { + 'actual': { + 'likeCount': 670, + 'shareCount': 75, + 'commentCount': 68, + 'loveCount': 55, + 'wowCount': 40, + 'hahaCount': 4, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 47, + 'shareCount': 12, + 'commentCount': 14, + 'loveCount': 6, + 'wowCount': 5, + 'hahaCount': 7, + 'sadCount': 5, + 'angryCount': 11, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 44528, + 'name': 'Vox', + 'handle': 'Vox', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15327441_612869972233942_727410529402189533_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQnoAo-srh87mkvD-DKqEDzFi4nn14JVBUE8HPqgKgoKz2LtUzKnd7p6NTRpO6WA_Gg&_nc_ht=scontent.xx&oh=ffdab33a30a7adbfde40574c198f8580&oe=5DF8E26D', + 'subscriberCount': 2426279, + 'url': 'https://www.facebook.com/223649167822693', + 'platform': 'Facebook', + 'platformId': '223649167822693', + 'verified': True + } + }, { + 'id': 70168125844, + 'platformId': '86680728811_10158783743663812', + 'platform': 'Facebook', + 'date': '2019-09-07 22:23:31', + 'updated': '2019-09-08 00:45:25', + 'type': 'link', + 'title': "NOAA issues statement supporting Trump's claim Hurricane Dorian threatened Alabama", + 'caption': 'abcnews.go.com', + 'description': ' ', + 'message': "Five days after Donald Trump said Hurricane Dorian threatened Alabama, the National Oceanic and Atmospheric Association issued a statement late Friday supporting the president's claim and chastising a local branch of the National Weather Service.", + 'expandedLinks': [{ + 'original': 'https://abcn.ws/2LxdWiS', + 'expanded': 'https://abcnews.go.com/Politics/noaa-issues-statement-supporting-trumps-claim-hurricane-dorian/story?id=65442468&cid=social_fb_abcn' + }], + 'link': 'https://abcn.ws/2LxdWiS', + 'postUrl': 'https://www.facebook.com/ABCNews/posts/10158783743663812', + 'subscriberCount': 14195962, + 'score': 8.483180428134556, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCTUnaDJll_UUFU&w=558&h=558&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FInternational%2Fpresident-trump-dorian-map-ap-jef-190904_hpMain_16x9_992.jpg&cfs=1&sx=434&sy=0&sw=558&sh=558&_nc_hash=AQDMQUlXwjLGxsTb', + 'height': 558, + 'width': 558, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDSxXQ9JfnoeN5B&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FInternational%2Fpresident-trump-dorian-map-ap-jef-190904_hpMain_16x9_992.jpg&_nc_hash=AQB3C2q6MPbeZXRB' + }], + 'statistics': { + 'actual': { + 'likeCount': 494, + 'shareCount': 298, + 'commentCount': 657, + 'loveCount': 33, + 'wowCount': 56, + 'hahaCount': 624, + 'sadCount': 40, + 'angryCount': 572, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 133, + 'shareCount': 63, + 'commentCount': 57, + 'loveCount': 16, + 'wowCount': 17, + 'hahaCount': 15, + 'sadCount': 16, + 'angryCount': 10, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13878, + 'name': 'ABC News', + 'handle': 'ABCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02', + 'subscriberCount': 14196629, + 'url': 'https://www.facebook.com/86680728811', + 'platform': 'Facebook', + 'platformId': '86680728811', + 'verified': True + } + }, { + 'id': 70172118628, + 'platformId': '532854420074062_3748578948501577', + 'platform': 'Facebook', + 'date': '2019-09-07 23:30:30', + 'updated': '2019-09-08 00:43:19', + 'type': 'native_video', + 'description': 'Who else drank from the garden hose? 🙋\u200d♀️', + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/thefavesusa/videos/2423928947852297/', + 'expanded': 'https://www.facebook.com/thefavesusa/videos/2423928947852297/' + }], + 'link': 'https://www.facebook.com/thefavesusa/videos/2423928947852297/', + 'postUrl': 'https://www.facebook.com/thefavesusa/posts/3748578948501577', + 'subscriberCount': 6323442, + 'score': 8.475409836065573, + 'media': [{ + 'type': 'video', + 'url': 'https://video.xx.fbcdn.net/v/t42.9040-2/69386221_369404493728708_8333827521795588096_n.mp4?_nc_cat=106&efg=eyJybHIiOjMwMCwicmxhIjo1MTIsInZlbmNvZGVfdGFnIjoic3ZlX3NkIn0%3D&_nc_oc=AQl8yuqPhwpOYp-3k3lEh631dxvd6HErFyyQQRm5pf026ER0QfOkyIbwHk-bSMmFFJQ&rl=300&vabr=119&_nc_ht=video.xx&oh=1a7e8cda88710bfd6bfa5efe20d5ed37&oe=5D76A62C', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t15.13418-10/67083314_676082179563971_8068822950096142336_n.jpeg?_nc_cat=111&_nc_oc=AQnEkM29YazhIjlCJEMc16sm3Z-ucihUUjivj0tSs18hFWDWU2hWt2seWIZKG2u7gKE&_nc_ht=scontent.xx&oh=8bb846be002c2d37985d08c40ca21b00&oe=5E0C009F', + 'height': 720, + 'width': 720, + 'full': 'https://scontent.xx.fbcdn.net/v/t15.13418-10/67083314_676082179563971_8068822950096142336_n.jpeg?_nc_cat=111&_nc_oc=AQnEkM29YazhIjlCJEMc16sm3Z-ucihUUjivj0tSs18hFWDWU2hWt2seWIZKG2u7gKE&_nc_ht=scontent.xx&oh=8bb846be002c2d37985d08c40ca21b00&oe=5E0C009F' + }], + 'statistics': { + 'actual': { + 'likeCount': 2766, + 'shareCount': 1474, + 'commentCount': 163, + 'loveCount': 116, + 'wowCount': 5, + 'hahaCount': 129, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 247, + 'shareCount': 185, + 'commentCount': 47, + 'loveCount': 25, + 'wowCount': 6, + 'hahaCount': 33, + 'sadCount': 4, + 'angryCount': 2, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 48728, + 'name': 'Faves USA', + 'handle': 'thefavesusa', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13590243_1529567430402751_5505197343663543097_n.jpg?_nc_cat=1&_nc_oc=AQlqHYa5f3hh3Tu7bwL_7yF5WVkxCnE2WIU8c_5Fs_eMudF84ODKZoLqn8S3lZDdt3g&_nc_ht=scontent.xx&oh=b45134ffcb1aa806ced2cb018887de04&oe=5E0ED98A', + 'subscriberCount': 6323373, + 'url': 'https://www.facebook.com/532854420074062', + 'platform': 'Facebook', + 'platformId': '532854420074062', + 'verified': True + }, + 'videoLengthMS': 19966 + }, { + 'id': 70161279172, + 'platformId': '210277954204_494897387960275', + 'platform': 'Facebook', + 'date': '2019-09-07 20:07:59', + 'updated': '2019-09-08 00:39:40', + 'type': 'native_video', + 'message': 'A Portland Jury clears pair accused of beating up a Trump supporter for "wearing a MAGA hat." Turns out he was trying to start a fight.', + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/TheYoungTurks/videos/494897387960275/', + 'expanded': 'https://www.facebook.com/TheYoungTurks/videos/494897387960275/' + }], + 'link': 'https://www.facebook.com/TheYoungTurks/videos/494897387960275/', + 'postUrl': 'https://www.facebook.com/TheYoungTurks/posts/494897387960275', + 'subscriberCount': 2099948, + 'score': 8.434615384615384, + 'media': [{ + 'type': 'video', + 'url': 'https://video.xx.fbcdn.net/v/t42.9040-2/70666582_532317314181156_4242284247894720512_n.mp4?_nc_cat=108&efg=eyJybHIiOjMwMCwicmxhIjo1OTksInZlbmNvZGVfdGFnIjoic3ZlX3NkIn0%3D&_nc_oc=AQkidjZHM0f2cKgYgy1J2IaIUNTobts-mBT-vH1MUORQ2TAVP1-t_ndykwSDLNaUdSo&rl=300&vabr=141&_nc_ht=video.xx&oh=5235f9aecbb3ac96f96cfeb798560bac&oe=5D76BA2A', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/69260973_631601874030315_3567498504341291008_n.jpg?_nc_cat=102&_nc_oc=AQnzNPRYkA0-eTOU0Omn1s1XaHepBwKgs6WGDui_Wh0CleypMz3UDQd7TX6v9RvfDAM&_nc_ht=scontent.xx&oh=95988a6fca08311254e15dc697a8dbf2&oe=5E10901F', + 'height': 360, + 'width': 640, + 'full': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/69260973_631601874030315_3567498504341291008_n.jpg?_nc_cat=102&_nc_oc=AQnzNPRYkA0-eTOU0Omn1s1XaHepBwKgs6WGDui_Wh0CleypMz3UDQd7TX6v9RvfDAM&_nc_ht=scontent.xx&oh=95988a6fca08311254e15dc697a8dbf2&oe=5E10901F' + }], + 'statistics': { + 'actual': { + 'likeCount': 969, + 'shareCount': 475, + 'commentCount': 186, + 'loveCount': 155, + 'wowCount': 25, + 'hahaCount': 368, + 'sadCount': 8, + 'angryCount': 7, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 106, + 'shareCount': 51, + 'commentCount': 41, + 'loveCount': 14, + 'wowCount': 8, + 'hahaCount': 21, + 'sadCount': 6, + 'angryCount': 13, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6786, + 'name': 'The Young Turks', + 'handle': 'TheYoungTurks', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1003713_10151543513399205_523422522_n.jpg?_nc_cat=1&_nc_oc=AQnnXFBTIz-GDK79X4ZL1tWD8ZS5F3y_makkEyxpcCf_7U3QmoBvJjb9aWlpiMT8dro&_nc_ht=scontent.xx&oh=5684bdb9a01611f4ca6e9ea9dedbc57e&oe=5DF64CB5', + 'subscriberCount': 2100186, + 'url': 'https://www.facebook.com/210277954204', + 'platform': 'Facebook', + 'platformId': '210277954204', + 'verified': True + }, + 'videoLengthMS': 226601 + }, { + 'id': 70157861820, + 'platformId': '266790296879_10157610319761880', + 'platform': 'Facebook', + 'date': '2019-09-07 19:02:02', + 'updated': '2019-09-08 00:41:55', + 'type': 'link', + 'title': 'Opinion | Trump’s on a Path to a One-Term Presidency', + 'caption': 'bloomberg.com', + 'description': 'To get re-elected, he will need to truly end the trade war.', + 'message': "Trump may be following in George H.W. Bush's footsteps without meaning to.", + 'expandedLinks': [{ + 'original': 'https://bloom.bg/2LDuYf4', + 'expanded': 'https://www.bloomberg.com/opinion/articles/2019-08-29/how-trump-is-like-george-h-w-bush?utm_content=business&utm_source=facebook&utm_campaign=socialflow-organic&cmpid=socialflow-facebook-business&utm_medium=social' + }], + 'link': 'https://bloom.bg/2LDuYf4', + 'postUrl': 'https://www.facebook.com/bloombergbusiness/posts/10157610319761880', + 'subscriberCount': 2955474, + 'score': 8.300970873786408, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBG6bcCSWbVI9VT&w=720&h=720&url=https%3A%2F%2Fs3.amazonaws.com%2Fprod-cust-photo-posts-jfaikqealaka%2F3687-a50e4c223504f106b77d0e43d433a6e5.jpg&cfs=1&_nc_hash=AQDJXDhZ9O3u2mdA', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCB2w8esj-MwK9d&url=https%3A%2F%2Fs3.amazonaws.com%2Fprod-cust-photo-posts-jfaikqealaka%2F3687-a50e4c223504f106b77d0e43d433a6e5.jpg&_nc_hash=AQABfg-6cmYGHxYr' + }], + 'statistics': { + 'actual': { + 'likeCount': 330, + 'shareCount': 105, + 'commentCount': 209, + 'loveCount': 104, + 'wowCount': 3, + 'hahaCount': 102, + 'sadCount': 0, + 'angryCount': 2, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 40, + 'shareCount': 16, + 'commentCount': 18, + 'loveCount': 3, + 'wowCount': 5, + 'hahaCount': 10, + 'sadCount': 3, + 'angryCount': 8, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 10343, + 'name': 'Bloomberg', + 'handle': 'bloombergbusiness', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/31790536_10156383343951880_9143173959372505088_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQm0CmNHVi4wKjfV2xKZ8WmMFbjVnwkn6rwlbqPewk5wTL0Plzu-cY8b0zLOAhS4DLw&_nc_ht=scontent.xx&oh=6eda22b5a7936ec78ea6929b3ed38430&oe=5E1356BD', + 'subscriberCount': 2955809, + 'url': 'https://www.facebook.com/266790296879', + 'platform': 'Facebook', + 'platformId': '266790296879', + 'verified': True + } + }, { + 'id': 70164120670, + 'platformId': '177486166274_10156706635101275', + 'platform': 'Facebook', + 'date': '2019-09-07 21:05:00', + 'updated': '2019-09-08 00:42:17', + 'type': 'link', + 'title': "Mom shoots 'intruder' who turned out to be her daughter surprising her from college", + 'caption': 'yahoo.com', + 'description': "Late Friday night, the 18-year-old, along with her boyfriend, entered her mother's house in an attempt to surprise her. Her mother thought it was an intruder entering her home.", + 'message': "(W) Home of the brave.... When her daughter opened her mom’s bedroom door, the mother fired a single shot from a .38 special, striking her daughter's elbow. It wasn't until the mom fired the gun that she realized it was her daughter.", + 'expandedLinks': [{ + 'original': 'https://www.yahoo.com/lifestyle/mom-shoots-intruder-who-turned-out-to-be-her-daughter-surprising-her-from-college-225514706.html', + 'expanded': 'https://www.yahoo.com/lifestyle/mom-shoots-intruder-who-turned-out-to-be-her-daughter-surprising-her-from-college-225514706.html' + }], + 'link': 'https://www.yahoo.com/lifestyle/mom-shoots-intruder-who-turned-out-to-be-her-daughter-surprising-her-from-college-225514706.html', + 'postUrl': 'https://www.facebook.com/beingliberal.org/posts/10156706635101275', + 'subscriberCount': 1693698, + 'score': 8.274353876739562, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDOTTId7tCQIL7n&w=720&h=720&url=https%3A%2F%2Fs.yimg.com%2Fuu%2Fapi%2Fres%2F1.2%2FHQfXkS3QwU_I_QWKNwy6tQ--%7EB%2FaD0xMDgwO3c9MTkyMDtzbT0xO2FwcGlkPXl0YWNoeW9u%2Fhttp%3A%2F%2Fmedia.zenfs.com%2Fen-US%2Fvideo%2Fwjw_cleveland_686%2Fcdb39c35e380bad83d6543cf2498bdde&cfs=1&_nc_hash=AQAxnw1IYaZgszl1', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCKeO31cDOTHyer&url=https%3A%2F%2Fs.yimg.com%2Fuu%2Fapi%2Fres%2F1.2%2FHQfXkS3QwU_I_QWKNwy6tQ--%7EB%2FaD0xMDgwO3c9MTkyMDtzbT0xO2FwcGlkPXl0YWNoeW9u%2Fhttp%3A%2F%2Fmedia.zenfs.com%2Fen-US%2Fvideo%2Fwjw_cleveland_686%2Fcdb39c35e380bad83d6543cf2498bdde&_nc_hash=AQD4QM-W36TBTH1w' + }], + 'statistics': { + 'actual': { + 'likeCount': 260, + 'shareCount': 1341, + 'commentCount': 528, + 'loveCount': 5, + 'wowCount': 489, + 'hahaCount': 115, + 'sadCount': 892, + 'angryCount': 532, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 151, + 'shareCount': 137, + 'commentCount': 83, + 'loveCount': 18, + 'wowCount': 19, + 'hahaCount': 34, + 'sadCount': 11, + 'angryCount': 50, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 5860, + 'name': 'Being Liberal', + 'handle': 'beingliberal.org', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16865169_10154418564961275_3050958479071030073_n.png?_nc_cat=1&_nc_oc=AQlPF5wIrIXWCeRBPDA5P17NqQMaux6LCm9Ak8V6ktaSHP0ajoY7MreFOF-RleH_5sQ&_nc_ht=scontent.xx&oh=39015e43af0ae9881035d6aa4a9fe5fc&oe=5E0A093D', + 'subscriberCount': 1693705, + 'url': 'https://www.facebook.com/177486166274', + 'platform': 'Facebook', + 'platformId': '177486166274', + 'verified': True + } + }, { + 'id': 70164191280, + 'platformId': '7292655492_396666331043489', + 'platform': 'Facebook', + 'date': '2019-09-07 21:08:45', + 'updated': '2019-09-08 00:43:45', + 'type': 'live_video_complete', + 'message': 'Hear the plan to proceed with the impeachment inquiry against Donald Trump! Rep. Pramila Jayapal and Rep. Jamie Raskin share their blueprint to hold Trump accountable for his crimes as the House Judiciary Committee begins impeachment proceedings. (via act.tv)', + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/moveon/videos/396666331043489/', + 'expanded': 'https://www.facebook.com/moveon/videos/396666331043489/' + }], + 'link': 'https://www.facebook.com/moveon/videos/396666331043489/', + 'postUrl': 'https://www.facebook.com/moveon/posts/396666331043489', + 'subscriberCount': 1654145, + 'score': 8.243869209809265, + 'media': [{ + 'type': 'video', + 'url': 'https://video.xx.fbcdn.net/v/t42.1790-2/10000000_384414495807916_8369269378861398547_n.mp4?_nc_cat=109&vs=b1c91073f726db69&_nc_vs=HBksFQAYJEdJQ1dtQUNzR1hoNW4xMEJBQk42bG1Rb25pVjBidjRHQUFBRhUAABUAGCRHSUNXbUFBb0dwZGxIRGdDQUxaOTVyM0FRY1k4YnY0R0FBQUYVAgAoRC1pICclcycgLWZiX3VzZV90ZmR0X3N0YXJ0dGltZSAxIC1pICclcycgLWMgY29weSAtbW92ZmxhZ3MgZmFzdHN0YXJ0KwGIEnByb2dyZXNzaXZlX3JlY2lwZQExFQAlABwAABgKMTAzNTc0NTI3NxbEgMiygrG0ARXGDxkFGAJDMxgDYXYxHBdAu7PybpeNUBgZZGFzaF9saXZlX21kX2ZyYWdfMl92aWRlbxIAGBh2aWRlb3MudnRzLmNhbGxiYWNrLnByb2QZHBUAFYqfAwAoElZJREVPX1ZJRVdfUkVRVUVTVBsGiBVvZW1fdGFyZ2V0X2VuY29kZV90YWcGb2VwX3NkE29lbV9yZXF1ZXN0X3RpbWVfbXMNMTU2Nzg5NzY4Njc1NAxvZW1fY2ZnX3J1bGUSd2FzbGl2ZV9zZF90cmltbWVkDG9lbV92aWRlb19pZA8zOTY2NzAxNDQzNzY0NDESb2VtX3ZpZGVvX2Fzc2V0X2lkDzM5NjY2NjMyNDM3NjgyMxVvZW1fdmlkZW9fcmVzb3VyY2VfaWQPMzk2NjY2MzIxMDQzNDkwJQQcHBwVgLUYGwFVAJKVARsBVQD06gEcFQIAAAAAAA%3D%3D&efg=eyJ2ZW5jb2RlX3RhZyI6Im9lcF9zZCJ9&_nc_log=1&_nc_oc=AQn6cMan5-t8T_lfzD75KzZyHqeMamHDTQHAvb_GPdVKeVB-V0EcB49vddi0kVoi78c&_nc_ht=video.xx&oh=f59a23e608d9d22929afc11c1da23757&oe=5D76966C&_nc_rid=efa046a5126147e', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/s720x720/68874404_396668534376602_4308557968388915200_n.jpg?_nc_cat=106&_nc_log=1&_nc_oc=AQmMVLY5zqQURDB2TKO1zooHpKwvzND4lK84W6kB-Rtq8iKO3maR8xP9pLHxIUZSD00&_nc_ht=scontent.xx&oh=b212bce3c0fb77ad0529d301f5e8c6fb&oe=5E1167E5', + 'height': 405, + 'width': 720, + 'full': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/68874404_396668534376602_4308557968388915200_n.jpg?_nc_cat=106&_nc_oc=AQmMVLY5zqQURDB2TKO1zooHpKwvzND4lK84W6kB-Rtq8iKO3maR8xP9pLHxIUZSD00&_nc_ht=scontent.xx&oh=7170dece3f1407f19873cf1dbee215b5&oe=5E14EAAD' + }], + 'statistics': { + 'actual': { + 'likeCount': 1026, + 'shareCount': 789, + 'commentCount': 3268, + 'loveCount': 566, + 'wowCount': 17, + 'hahaCount': 195, + 'sadCount': 10, + 'angryCount': 180, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 289, + 'shareCount': 119, + 'commentCount': 194, + 'loveCount': 105, + 'wowCount': 4, + 'hahaCount': 6, + 'sadCount': 9, + 'angryCount': 8, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 3832, + 'name': 'MoveOn', + 'handle': 'moveon', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/31206661_10155375246245493_295037061581229251_n.png?_nc_cat=1&_nc_oc=AQlSE1FqdCbaeopNV1yaNtJ3CmFLidqKES5CzQDuKERpCBGKUk_e3fO242Wi3KvNKSE&_nc_ht=scontent.xx&oh=ca9e5b7aef01fe823dc1929cfd53827d&oe=5E10EEAD', + 'subscriberCount': 1654130, + 'url': 'https://www.facebook.com/7292655492', + 'platform': 'Facebook', + 'platformId': '7292655492', + 'verified': True + }, + 'videoLengthMS': 7091947, + 'liveVideoStatus': 'completed' + }, { + 'id': 70174258568, + 'platformId': '20446254070_10156890840439071', + 'platform': 'Facebook', + 'date': '2019-09-08 00:00:29', + 'updated': '2019-09-08 00:42:26', + 'type': 'link', + 'title': 'Trump says he invited Taliban leaders to Camp David for a secret meeting, but canceled because of a recent attack that killed a US soldier', + 'caption': 'businessinsider.com', + 'description': '"The major Taliban leaders and, separately, the President of Afghanistan, were going to secretly meet with me at Camp David on Sunday," Trump tweeted.', + 'message': "Trump said he's also called off all peace negotiations with the Taliban.", + 'expandedLinks': [{ + 'original': 'https://bit.ly/2PXgMDl', + 'expanded': 'https://www.businessinsider.com/trump-canceled-secret-camp-david-meeting-with-taliban-leaders-2019-9?utm_content=buffer324e2&utm_medium=social&utm_source=facebook.com&utm_campaign=buffer-bi' + }], + 'link': 'https://bit.ly/2PXgMDl', + 'postUrl': 'https://www.facebook.com/businessinsider/posts/10156890840439071', + 'subscriberCount': 9107012, + 'score': 8.193548387096774, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAuUoFSrIOPEUbT&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d7443d4665c02241436f629%2Fc9e2a350ed76b667887bd8bc901046ef19df8a7d_6480a23340bd0fff02ebb6fa6d654bc5bee2c290_facebook&cfs=1&_nc_hash=AQC1rMqJsyRwHoJG', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBkk8ywT_dgoUXY&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d7443d4665c02241436f629%2Fc9e2a350ed76b667887bd8bc901046ef19df8a7d_6480a23340bd0fff02ebb6fa6d654bc5bee2c290_facebook&_nc_hash=AQClYoSFI8eIT1VN' + }], + 'statistics': { + 'actual': { + 'likeCount': 35, + 'shareCount': 26, + 'commentCount': 96, + 'loveCount': 1, + 'wowCount': 9, + 'hahaCount': 60, + 'sadCount': 1, + 'angryCount': 26, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 9, + 'shareCount': 4, + 'commentCount': 4, + 'loveCount': 2, + 'wowCount': 2, + 'hahaCount': 4, + 'sadCount': 3, + 'angryCount': 3, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6648, + 'name': 'Business Insider', + 'handle': 'businessinsider', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/20140008_10154867513079071_8190657407315988923_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkI55CBCj4kJdip-PX9AJ_S4mxJ5XQ4nlum3ikySzQgBRQCJSXsyjHW-8w8qPH2aX4&_nc_ht=scontent.xx&oh=4d024551fc98af700d89602c6980c3c0&oe=5E155CB9', + 'subscriberCount': 9107575, + 'url': 'https://www.facebook.com/20446254070', + 'platform': 'Facebook', + 'platformId': '20446254070', + 'verified': True + } + }, { + 'id': 70157398315, + 'platformId': '155869377766434_3572569126096425', + 'platform': 'Facebook', + 'date': '2019-09-07 18:52:08', + 'updated': '2019-09-07 23:00:35', + 'type': 'link', + 'title': 'Thousands listed as missing in Bahamas in Hurricane Dorian’s wake', + 'caption': 'nbcnews.com', + 'description': ' ', + 'message': 'Thousands of people are desperately trying to find loved ones in the Bahamas.', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/34xqqzs', + 'expanded': 'https://www.nbcnews.com/news/world/thousands-listed-missing-bahamas-hurricane-dorian-s-wake-n1050791?cid=sm_npd_nn_fb_ma' + }], + 'link': 'https://nbcnews.to/34xqqzs', + 'postUrl': 'https://www.facebook.com/NBCNews/posts/3572569126096425', + 'subscriberCount': 9970622, + 'score': 8.153543307086615, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD6stbq-rFP7Jda&w=720&h=720&url=https%3A%2F%2Fmedia1.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F3000001%2F190906-bahamas-aftermath-dorian-al-1111_fbd341856b3fa8ce3a08a04f0fca9b14.nbcnews-fp-1200-630.jpg&cfs=1&_nc_hash=AQBVY5Go-4zF-tlS', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCocMqpL-yoqFsO&url=https%3A%2F%2Fmedia1.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F3000001%2F190906-bahamas-aftermath-dorian-al-1111_fbd341856b3fa8ce3a08a04f0fca9b14.nbcnews-fp-1200-630.jpg&_nc_hash=AQDssZadqERvIDEf' + }], + 'statistics': { + 'actual': { + 'likeCount': 127, + 'shareCount': 567, + 'commentCount': 95, + 'loveCount': 0, + 'wowCount': 62, + 'hahaCount': 1, + 'sadCount': 1214, + 'angryCount': 5, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 61, + 'shareCount': 50, + 'commentCount': 54, + 'loveCount': 10, + 'wowCount': 19, + 'hahaCount': 19, + 'sadCount': 21, + 'angryCount': 20, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13889, + 'name': 'NBC News', + 'handle': 'NBCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/58460954_3259154034104604_4667908299973197824_n.png?_nc_cat=1&_nc_oc=AQkP72-xbAw6uUN-KZG8hLfS-bT5o6BRIMSNURKuXBbEhrFa7sT75fvZfTBZDVa21CU&_nc_ht=scontent.xx&oh=ddb1e61de6dabbf61e903f59efde1f0c&oe=5DF7A653', + 'subscriberCount': 9970540, + 'url': 'https://www.facebook.com/155869377766434', + 'platform': 'Facebook', + 'platformId': '155869377766434', + 'verified': True + } + }, { + 'id': 70172968858, + 'platformId': '1830665590513511_2501693993410664', + 'platform': 'Facebook', + 'date': '2019-09-07 23:35:59', + 'updated': '2019-09-08 00:41:20', + 'type': 'link', + 'title': "Trump says he's called off secret Taliban meeting at Camp David over Afghanistan bombing", + 'caption': 'axios.com', + 'description': 'They were "trying to build false leverage" with the bombing, he tweeted.', + 'message': 'President Trump tweets: "Unbeknownst to almost everyone, the major Taliban leaders and, separately, the President of Afghanistan, were going to secretly meet with me at Camp David on Sunday. They were coming to the United States tonight. Unfortunately, in order to build false leverage, they admitted to an attack in Kabul that killed one of our great great soldiers, and 11 other people. I immediately cancelled the meeting and called off peace negotiations."', + 'expandedLinks': [{ + 'original': 'https://www.axios.com/kabul-bombing-trump-tweets-he-called-off-taliban-talks-73d1b998-375c-4e09-9a4c-6c2355bd2019.html?utm_source=facebook&utm_medium=social&utm_campaign=onhrs', + 'expanded': 'https://www.axios.com/kabul-bombing-trump-tweets-he-called-off-taliban-talks-73d1b998-375c-4e09-9a4c-6c2355bd2019.html?utm_source=facebook&utm_medium=social&utm_campaign=onhrs' + }], + 'link': 'https://www.axios.com/kabul-bombing-trump-tweets-he-called-off-taliban-talks-73d1b998-375c-4e09-9a4c-6c2355bd2019.html?utm_source=facebook&utm_medium=social&utm_campaign=onhrs', + 'postUrl': 'https://www.facebook.com/axiosnews/posts/2501693993410664', + 'subscriberCount': 339339, + 'score': 8.062176165803109, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDkia2HrmsaC2WF&w=720&h=720&url=https%3A%2F%2Fimages.axios.com%2FmVrpxqNy0PvpzRVMzYcFpeqxy8s%3D%2F0x0%3A5871x3302%2F1920x1080%2F2019%2F09%2F07%2F1567898519236.jpg&cfs=1&sx=505&sy=0&sw=1080&sh=1080&_nc_hash=AQCciPKG-QM_Xd3G', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCtLVStevGG0CNY&url=https%3A%2F%2Fimages.axios.com%2FmVrpxqNy0PvpzRVMzYcFpeqxy8s%3D%2F0x0%3A5871x3302%2F1920x1080%2F2019%2F09%2F07%2F1567898519236.jpg&_nc_hash=AQAOf6U399snb_jC' + }], + 'statistics': { + 'actual': { + 'likeCount': 133, + 'shareCount': 276, + 'commentCount': 521, + 'loveCount': 10, + 'wowCount': 60, + 'hahaCount': 399, + 'sadCount': 7, + 'angryCount': 150, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 61, + 'shareCount': 38, + 'commentCount': 32, + 'loveCount': 9, + 'wowCount': 8, + 'hahaCount': 15, + 'sadCount': 10, + 'angryCount': 20, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 1431632, + 'name': 'Axios', + 'handle': 'axiosnews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/46844445_2289864377926961_9207563348864925696_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQncZ-V-nWa7ihCtPUY2OE7NX8kzbdrK9hiEMhqNa6qBeOKkh3VKYYgS2lvKd-xjZnI&_nc_ht=scontent.xx&oh=3fa348414b7b9cfcabc2cd5bc93789f4&oe=5E0F6422', + 'subscriberCount': 339358, + 'url': 'https://www.facebook.com/1830665590513511', + 'platform': 'Facebook', + 'platformId': '1830665590513511', + 'verified': True + } + }, { + 'id': 70159083274, + 'platformId': '182919686769_10156514740791770', + 'platform': 'Facebook', + 'date': '2019-09-07 19:30:03', + 'updated': '2019-09-08 00:40:44', + 'type': 'link', + 'title': 'Bernie Sanders Trots Out Linda Sarsour As A Campaign Surrogate', + 'caption': 'dailycaller.com', + 'description': 'Sen. Bernie Sanders enlisted the services of far-left activist Linda Sarsour to campaign for him Tuesday.', + 'message': '👀', + 'expandedLinks': [{ + 'original': 'https://dailycaller.com/2019/09/07/bernie-sanders-linda-sarsour-jewish-israel/', + 'expanded': 'https://dailycaller.com/2019/09/07/bernie-sanders-linda-sarsour-jewish-israel/' + }], + 'link': 'https://dailycaller.com/2019/09/07/bernie-sanders-linda-sarsour-jewish-israel/', + 'postUrl': 'https://www.facebook.com/DailyCaller/posts/10156514740791770', + 'subscriberCount': 5408428, + 'score': 7.962800875273523, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQACjaoDOCXkFkcN&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73dcf19809dd005d498638%2F5c68b2a57cab7e35a5e8f5b773152a5e109394bd_22df6d5faf11f33cc50e308c256b76e777c5ec58_facebook&cfs=1&_nc_hash=AQBwIFadr-2O_lob', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDuj5H_nVm_5Yrb&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73dcf19809dd005d498638%2F5c68b2a57cab7e35a5e8f5b773152a5e109394bd_22df6d5faf11f33cc50e308c256b76e777c5ec58_facebook&_nc_hash=AQCBL_eis9doenDw' + }], + 'statistics': { + 'actual': { + 'likeCount': 151, + 'shareCount': 741, + 'commentCount': 939, + 'loveCount': 1, + 'wowCount': 179, + 'hahaCount': 510, + 'sadCount': 57, + 'angryCount': 1061, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 142, + 'shareCount': 65, + 'commentCount': 128, + 'loveCount': 10, + 'wowCount': 17, + 'hahaCount': 48, + 'sadCount': 8, + 'angryCount': 39, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13489, + 'name': 'The Daily Caller', + 'handle': 'DailyCaller', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471', + 'subscriberCount': 5408115, + 'url': 'https://www.facebook.com/182919686769', + 'platform': 'Facebook', + 'platformId': '182919686769', + 'verified': True + } + }, { + 'id': 70172724809, + 'platformId': '319569361390023_3033649303315335', + 'platform': 'Facebook', + 'date': '2019-09-07 23:30:05', + 'updated': '2019-09-08 00:33:02', + 'type': 'link', + 'title': "David Hogg: White people feel it's American to pick up of gun because of their xenophobia", + 'caption': 'twitchy.com', + 'description': "It's always good to hear David Hogg and Chris Hayes discuss white gun owners.", + 'message': 'Maybe some white people don\'t want to "explore" being robbed or raped.', + 'expandedLinks': [{ + 'original': 'https://twitchy.com/brettt-3136/2019/09/07/david-hogg-white-people-feel-its-american-to-pick-up-of-gun-because-of-their-xenophobia/?utm_content=buffer6a4a5&utm_medium=social&utm_source=facebook.com&utm_campaign=buffer', + 'expanded': 'https://twitchy.com/brettt-3136/2019/09/07/david-hogg-white-people-feel-its-american-to-pick-up-of-gun-because-of-their-xenophobia/?utm_content=buffer6a4a5&utm_medium=social&utm_source=facebook.com&utm_campaign=buffer' + }], + 'link': 'https://twitchy.com/brettt-3136/2019/09/07/david-hogg-white-people-feel-its-american-to-pick-up-of-gun-because-of-their-xenophobia/?utm_content=buffer6a4a5&utm_medium=social&utm_source=facebook.com&utm_campaign=buffer', + 'postUrl': 'https://www.facebook.com/TeamTwitchy/posts/3033649303315335', + 'subscriberCount': 1230972, + 'score': 7.857142857142857, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQB7bT4nl7lkv-Df&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d743818b7ba8c1f095735f2%2F13d92b6c4918c5e371fb73d839e5ebf767c467fa_02cfbdaa9db52496964c2f8d6d16b3195a9de7a6_facebook&cfs=1&_nc_hash=AQCxNnhZKprHuha9', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2zIJAVELVhdO7&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d743818b7ba8c1f095735f2%2F13d92b6c4918c5e371fb73d839e5ebf767c467fa_02cfbdaa9db52496964c2f8d6d16b3195a9de7a6_facebook&_nc_hash=AQAuc9IHR5rV3WRK' + }], + 'statistics': { + 'actual': { + 'likeCount': 23, + 'shareCount': 27, + 'commentCount': 206, + 'loveCount': 1, + 'wowCount': 3, + 'hahaCount': 85, + 'sadCount': 2, + 'angryCount': 38, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 8, + 'shareCount': 5, + 'commentCount': 10, + 'loveCount': 2, + 'wowCount': 3, + 'hahaCount': 12, + 'sadCount': 2, + 'angryCount': 7, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13491, + 'name': 'Twitchy', + 'handle': 'TeamTwitchy', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1510706_1291332407547042_3464451234671669539_n.jpg?_nc_cat=104&_nc_oc=AQlx0XkhJRril3GnGFgvw8EB7HLdxYW1VUJdclk_vtkSyD2YU5sgdJRr3b2u_ybasgg&_nc_ht=scontent.xx&oh=4c3a5afcfd13d7b38b6853399aa44529&oe=5E0B1BA3', + 'subscriberCount': 1230881, + 'url': 'https://www.facebook.com/319569361390023', + 'platform': 'Facebook', + 'platformId': '319569361390023', + 'verified': True + } + }, { + 'id': 70174473556, + 'platformId': '134486075205_10163969337575206', + 'platform': 'Facebook', + 'date': '2019-09-08 00:06:06', + 'updated': '2019-09-08 00:43:59', + 'type': 'link', + 'title': 'AOC demands Trump’s impeachment amid probe into his Scottish resort', + 'caption': 'nypost.com', + 'description': 'Rep. Alexandria Ocasio-Cortez has issued a fresh call to impeach President Trump amid reports of a House investigation into a military stop at his Scottish golf club. “The President is corrupt and …', + 'message': '“The President is corrupt and must be impeached,” the freshman Democrat posted on Twitter Friday.', + 'expandedLinks': [{ + 'original': 'https://trib.al/645cHyB', + 'expanded': 'https://nypost.com/2019/09/07/aoc-demands-trumps-impeachment-amid-probe-into-his-scottish-resort/?sr_share=facebook&utm_source=NYPFacebook&utm_medium=SocialFlow&utm_campaign=SocialFlow' + }], + 'link': 'https://trib.al/645cHyB', + 'postUrl': 'https://www.facebook.com/NYPost/posts/10163969337575206', + 'subscriberCount': 4182920, + 'score': 7.796610169491525, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDn49MSINxbWE43&w=720&h=720&url=https%3A%2F%2Fthenypost.files.wordpress.com%2F2019%2F09%2Faoc-demands-trump-impeachment.jpg%3Fquality%3D90%26strip%3Dall%26w%3D1200&cfs=1&_nc_hash=AQBVKLCAMMKs3Qot', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCDfTkvw58OC2Sh&url=https%3A%2F%2Fthenypost.files.wordpress.com%2F2019%2F09%2Faoc-demands-trump-impeachment.jpg%3Fquality%3D90%26strip%3Dall%26w%3D1200&_nc_hash=AQCPwMe3FJbBiyK5' + }], + 'statistics': { + 'actual': { + 'likeCount': 231, + 'shareCount': 94, + 'commentCount': 259, + 'loveCount': 51, + 'wowCount': 4, + 'hahaCount': 255, + 'sadCount': 0, + 'angryCount': 26, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 28, + 'shareCount': 22, + 'commentCount': 30, + 'loveCount': 3, + 'wowCount': 8, + 'hahaCount': 14, + 'sadCount': 6, + 'angryCount': 7, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 10342, + 'name': 'New York Post', + 'handle': 'NYPost', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/12932928_10157483552025206_1176575955706691041_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQnPmbZuC7S1v1NTPRZ7rWQU4EucwAW3nKx-aXD0PzlPsD3ifQpdaLcXEegH730Wy_o&_nc_ht=scontent.xx&oh=c77d86309611fa2972df1979bf6cab9e&oe=5E0827CA', + 'subscriberCount': 4183079, + 'url': 'https://www.facebook.com/134486075205', + 'platform': 'Facebook', + 'platformId': '134486075205', + 'verified': True + } + }, { + 'id': 70166864447, + 'platformId': '268914272540_10156464324502541', + 'platform': 'Facebook', + 'date': '2019-09-07 22:00:07', + 'updated': '2019-09-08 00:30:46', + 'type': 'link', + 'title': 'Undocumented man who cooperated in case against Jamaican kingpin Christopher ‘Dudus’ Coke fights deadly deportation', + 'caption': 'nydailynews.com', + 'description': ' ', + 'message': 'An undocumented man who helped the government bring down one of Jamaica’s most notorious drug kingpins faces a deportation death sentence. The immigrant, identified in court papers only as Sean B., was a cooperating witness in the case against Christopher “Dudus” Coke in 2011, which made him a marked man.', + 'expandedLinks': [{ + 'original': 'https://trib.al/awiaROM', + 'expanded': 'https://www.nydailynews.com/new-york/ny-coke-cooperator-deportation-20190907-2bgpnpfbpzearlsg4z6raexrye-story.html' + }], + 'link': 'https://trib.al/awiaROM', + 'postUrl': 'https://www.facebook.com/NYDailyNews/posts/10156464324502541', + 'subscriberCount': 3119682, + 'score': 7.7164179104477615, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCb_uyXtgvEH8YO&w=720&h=720&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2FzwNc3qlGv_FRTNABd4zyhmfutEU%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2FG27YK3ENX5FAXIEFQ7D3DHWYNI.jpg&cfs=1&sx=154&sy=0&sw=869&sh=869&_nc_hash=AQDIB2IEwrFbBQfH', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD8pD_gt7qTTxhB&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2FzwNc3qlGv_FRTNABd4zyhmfutEU%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2FG27YK3ENX5FAXIEFQ7D3DHWYNI.jpg&_nc_hash=AQDdkbiscfR0BlCW' + }], + 'statistics': { + 'actual': { + 'likeCount': 114, + 'shareCount': 456, + 'commentCount': 128, + 'loveCount': 2, + 'wowCount': 103, + 'hahaCount': 23, + 'sadCount': 23, + 'angryCount': 185, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 33, + 'shareCount': 30, + 'commentCount': 23, + 'loveCount': 4, + 'wowCount': 10, + 'hahaCount': 14, + 'sadCount': 6, + 'angryCount': 14, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 18752, + 'name': 'New York Daily News', + 'handle': 'NYDailyNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/34963357_10155516739962541_1916910854155010048_n.jpg?_nc_cat=1&_nc_oc=AQmjFK4eo-CK8fL21CSJr1btV3Al6e74byD7EyXVL8apaCEHf5ql7TW_ZRkUiYID0qY&_nc_ht=scontent.xx&oh=e33f579d2d00c6afc68a0e7cbd70b6c8&oe=5E0623E1', + 'subscriberCount': 3120017, + 'url': 'https://www.facebook.com/268914272540', + 'platform': 'Facebook', + 'platformId': '268914272540', + 'verified': True + } + }, { + 'id': 70160607290, + 'platformId': '20446254070_10156890348774071', + 'platform': 'Facebook', + 'date': '2019-09-07 20:05:01', + 'updated': '2019-09-07 23:47:08', + 'type': 'link', + 'title': 'A vegan sued her neighbors for cooking meat in their backyard, and now thousands are planning a barbecue just to annoy her', + 'caption': 'insider.com', + 'description': 'Cilla Carden, a vegan from Perth, Australia, recently sued her neighbors for having barbecues in their backyard, among other complaints.', + 'message': "The courts dismissed her case and denied her appeal, but Carden said she wouldn't stop fighting.", + 'expandedLinks': [{ + 'original': 'https://bit.ly/2HRRtMt', + 'expanded': 'https://www.insider.com/vegan-sued-neighbors-cooking-meat-in-their-backyard-2019-9?utm_content=buffer3311a&utm_medium=social&utm_source=facebook.com&utm_campaign=buffer-bi' + }], + 'link': 'https://bit.ly/2HRRtMt', + 'postUrl': 'https://www.facebook.com/businessinsider/posts/10156890348774071', + 'subscriberCount': 9107012, + 'score': 7.408163265306122, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDLlBBaIbf2Ofn8&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d727375508525086669bc32%2F23f2f8b77ee25719c0d7fbda1cd415481f2fc4ac_e87e2781797f5cbd3c789c90679fc3b18e6ad997_facebook&cfs=1&_nc_hash=AQAdosUrYP86btQh', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAdBjnZNFZjH3HL&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d727375508525086669bc32%2F23f2f8b77ee25719c0d7fbda1cd415481f2fc4ac_e87e2781797f5cbd3c789c90679fc3b18e6ad997_facebook&_nc_hash=AQAd_QWYfBovdu0G' + }], + 'statistics': { + 'actual': { + 'likeCount': 61, + 'shareCount': 83, + 'commentCount': 51, + 'loveCount': 6, + 'wowCount': 15, + 'hahaCount': 144, + 'sadCount': 0, + 'angryCount': 3, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 12, + 'shareCount': 6, + 'commentCount': 6, + 'loveCount': 3, + 'wowCount': 3, + 'hahaCount': 7, + 'sadCount': 5, + 'angryCount': 7, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6648, + 'name': 'Business Insider', + 'handle': 'businessinsider', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/20140008_10154867513079071_8190657407315988923_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkI55CBCj4kJdip-PX9AJ_S4mxJ5XQ4nlum3ikySzQgBRQCJSXsyjHW-8w8qPH2aX4&_nc_ht=scontent.xx&oh=4d024551fc98af700d89602c6980c3c0&oe=5E155CB9', + 'subscriberCount': 9107575, + 'url': 'https://www.facebook.com/20446254070', + 'platform': 'Facebook', + 'platformId': '20446254070', + 'verified': True + } + }, { + 'id': 70160757733, + 'platformId': '21516776437_10157334747216438', + 'platform': 'Facebook', + 'date': '2019-09-07 20:00:15', + 'updated': '2019-09-08 00:43:36', + 'type': 'link', + 'title': 'Trump Lost #Sharpiegate the Moment He Took It Seriously', + 'caption': 'slate.com', + 'description': 'There may be a limit to bending reality.', + 'message': 'Not even Fox News is going along with it.', + 'expandedLinks': [{ + 'original': 'https://slate.trib.al/gim6IVr', + 'expanded': 'https://slate.com/news-and-politics/2019/09/trumps-sharpie-defense-hurricane-dorian.html?via=rss_socialflow_facebook' + }], + 'link': 'https://slate.trib.al/gim6IVr', + 'postUrl': 'https://www.facebook.com/Slate/posts/10157334747216438', + 'subscriberCount': 1518914, + 'score': 7.38, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCq3vaUbv_4TMBr&w=720&h=720&url=https%3A%2F%2Fcompote.slate.com%2Fimages%2F4fe737a0-b801-46af-a378-286301f8b58f.jpeg%3Fwidth%3D780%26height%3D520%26rect%3D1248x832%26offset%3D0x0&cfs=1&_nc_hash=AQDS81aJDFJLV-Al', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBzEKXqdQ0KNLMY&url=https%3A%2F%2Fcompote.slate.com%2Fimages%2F4fe737a0-b801-46af-a378-286301f8b58f.jpeg%3Fwidth%3D780%26height%3D520%26rect%3D1248x832%26offset%3D0x0&_nc_hash=AQCwx4_3HgJfmfIR' + }], + 'statistics': { + 'actual': { + 'likeCount': 162, + 'shareCount': 44, + 'commentCount': 28, + 'loveCount': 11, + 'wowCount': 2, + 'hahaCount': 118, + 'sadCount': 1, + 'angryCount': 3, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 10, + 'shareCount': 6, + 'commentCount': 8, + 'loveCount': 3, + 'wowCount': 3, + 'hahaCount': 4, + 'sadCount': 5, + 'angryCount': 11, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6631, + 'name': 'Slate.com', + 'handle': 'Slate', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/26815412_10155867835401438_6786592847511925697_n.jpg?_nc_cat=1&_nc_oc=AQnlPqxpF8HJHZLBBP9M3JCvr7KRojNU13Gek2aIDlLStNh3FwBSADznEiZCEG1_doE&_nc_ht=scontent.xx&oh=fa5bf2320fbcba9484de00ac7f908e6c&oe=5DC8F5CA', + 'subscriberCount': 1518896, + 'url': 'https://www.facebook.com/21516776437', + 'platform': 'Facebook', + 'platformId': '21516776437', + 'verified': True + } + }, { + 'id': 70175280754, + 'platformId': '25987609066_10156789085049067', + 'platform': 'Facebook', + 'date': '2019-09-08 00:05:04', + 'updated': '2019-09-08 00:26:32', + 'type': 'link', + 'title': 'Trump says he was about to hold secret talks with the Taliban in the US, but canceled them', + 'caption': 'nbcnews.com', + 'description': ' ', + 'message': 'BREAKING: Days ahead of 9/11 anniversary, Pres. Trump announces that he was set to hold secret talks with the Taliban at Camp David in the US this weekend but he has called off the talks after a US service member was killed in a suicide attack in Kabul. https://trib.al/VYsjQl1', + 'expandedLinks': [{ + 'original': 'https://trib.al/VYsjQl1', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141' + }, { + 'original': 'https://trib.al/VYsjQl1', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141' + }], + 'link': 'https://trib.al/VYsjQl1', + 'postUrl': 'https://www.facebook.com/therachelmaddowshow/posts/10156789085049067', + 'subscriberCount': 2643600, + 'score': 7.22234762979684, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 228, + 'shareCount': 1043, + 'commentCount': 1616, + 'loveCount': 2, + 'wowCount': 578, + 'hahaCount': 1124, + 'sadCount': 63, + 'angryCount': 1745, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 221, + 'shareCount': 184, + 'commentCount': 118, + 'loveCount': 10, + 'wowCount': 31, + 'hahaCount': 14, + 'sadCount': 43, + 'angryCount': 265, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 3921, + 'name': 'The Rachel Maddow Show', + 'handle': 'therachelmaddowshow', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/560412_10150641324209067_326441500_n.jpg?_nc_cat=1&_nc_oc=AQll8voihNLTZqhNJxHo54RezqGbTpA2ADMeAJ0m1c--3__ynoI3yGrzvpSMzT6QrNI&_nc_ht=scontent.xx&oh=8f8d327dc4a47e1af85f9d3da82d4eb3&oe=5DFB0DA7', + 'subscriberCount': 2643600, + 'url': 'https://www.facebook.com/25987609066', + 'platform': 'Facebook', + 'platformId': '25987609066', + 'verified': True + } + }, { + 'id': 70161490237, + 'platformId': '86680728811_10158783373048812', + 'platform': 'Facebook', + 'date': '2019-09-07 20:10:39', + 'updated': '2019-09-08 00:45:25', + 'type': 'link', + 'title': '241 NYPD officers have died from 9/11 illnesses, 10 times the number killed at WTC', + 'caption': 'abcnews.go.com', + 'description': ' ', + 'message': 'To date, 241 members of the NYPD died of 9/11-related illnesses – compared to the 23 killed in the attack on the World Trade Center.', + 'expandedLinks': [{ + 'original': 'https://abcn.ws/34uNH5d', + 'expanded': 'https://abcnews.go.com/US/241-nypd-officers-died-911-illnesses-10-times/story?id=65430201&cid=social_fb_abcn' + }], + 'link': 'https://abcn.ws/34uNH5d', + 'postUrl': 'https://www.facebook.com/ABCNews/posts/10158783373048812', + 'subscriberCount': 14195962, + 'score': 7.180878552971576, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?w=558&h=558&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FUS%2Fnypd-ceremonial-wall-gty-jc-190906_hpMain_16x9_992.jpg&cfs=1&sx=188&sy=0&sw=558&sh=558&_nc_hash=AQDss5F9oj_ddJeI', + 'height': 558, + 'width': 558, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBfjZ1NKJj5a-AY&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FUS%2Fnypd-ceremonial-wall-gty-jc-190906_hpMain_16x9_992.jpg&_nc_hash=AQBQPg2M4kC-Vv9c' + }], + 'statistics': { + 'actual': { + 'likeCount': 254, + 'shareCount': 925, + 'commentCount': 104, + 'loveCount': 9, + 'wowCount': 120, + 'hahaCount': 0, + 'sadCount': 1324, + 'angryCount': 43, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 161, + 'shareCount': 77, + 'commentCount': 64, + 'loveCount': 20, + 'wowCount': 20, + 'hahaCount': 17, + 'sadCount': 17, + 'angryCount': 11, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13878, + 'name': 'ABC News', + 'handle': 'ABCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02', + 'subscriberCount': 14196629, + 'url': 'https://www.facebook.com/86680728811', + 'platform': 'Facebook', + 'platformId': '86680728811', + 'verified': True + } + }, { + 'id': 70166864446, + 'platformId': '268914272540_10156464339042541', + 'platform': 'Facebook', + 'date': '2019-09-07 22:06:14', + 'updated': '2019-09-08 00:30:46', + 'type': 'link', + 'title': 'Bianca Andreescu stuns Serena Williams to deny tennis legend a 24th Grand Slam title with straight-sets win in U.S. Open final', + 'caption': 'nydailynews.com', + 'description': 'It was a stunning straight-set romp.', + 'message': 'Bianca Andreescu stuns Serena Williams to deny tennis legend a 24th Grand Slam title with straight-sets win in U.S. Open final Canadian upstart Andreescu, 19, got the best of the American superstar in a convincing 6-3, 7-5 display at the U.S. Open.', + 'expandedLinks': [{ + 'original': 'https://www.nydailynews.com/sports/more-sports/ny-serena-williams-bianca-andreescu-us-open-final-20190907-6d5z6gyxajftdbwdw2ifgovzde-story.html', + 'expanded': 'https://www.nydailynews.com/sports/more-sports/ny-serena-williams-bianca-andreescu-us-open-final-20190907-6d5z6gyxajftdbwdw2ifgovzde-story.html' + }], + 'link': 'https://www.nydailynews.com/sports/more-sports/ny-serena-williams-bianca-andreescu-us-open-final-20190907-6d5z6gyxajftdbwdw2ifgovzde-story.html', + 'postUrl': 'https://www.facebook.com/NYDailyNews/posts/10156464339042541', + 'subscriberCount': 3119682, + 'score': 7.0894308943089435, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD4L3C7ugtRqaZf&w=720&h=720&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2F0hvHjy5wJQkhjkPtFeNbb3im7GY%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2F6W6GX2O4NFD6XOIHBY7OJR5DXY.jpg&cfs=1&_nc_hash=AQCtd3fvuMA0bX4a', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBd-FBpxDYozc6-&url=https%3A%2F%2Fwww.nydailynews.com%2Fresizer%2F0hvHjy5wJQkhjkPtFeNbb3im7GY%3D%2F1200x0%2Ftop%2Farc-anglerfish-arc2-prod-tronc.s3.amazonaws.com%2Fpublic%2F6W6GX2O4NFD6XOIHBY7OJR5DXY.jpg&_nc_hash=AQCdij0z_1ihuAiP' + }], + 'statistics': { + 'actual': { + 'likeCount': 260, + 'shareCount': 184, + 'commentCount': 135, + 'loveCount': 25, + 'wowCount': 90, + 'hahaCount': 20, + 'sadCount': 151, + 'angryCount': 7, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 29, + 'shareCount': 27, + 'commentCount': 22, + 'loveCount': 4, + 'wowCount': 9, + 'hahaCount': 13, + 'sadCount': 6, + 'angryCount': 13, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 18752, + 'name': 'New York Daily News', + 'handle': 'NYDailyNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/34963357_10155516739962541_1916910854155010048_n.jpg?_nc_cat=1&_nc_oc=AQmjFK4eo-CK8fL21CSJr1btV3Al6e74byD7EyXVL8apaCEHf5ql7TW_ZRkUiYID0qY&_nc_ht=scontent.xx&oh=e33f579d2d00c6afc68a0e7cbd70b6c8&oe=5E0623E1', + 'subscriberCount': 3120017, + 'url': 'https://www.facebook.com/268914272540', + 'platform': 'Facebook', + 'platformId': '268914272540', + 'verified': True + } + }, { + 'id': 70161756319, + 'platformId': '228735667216_10156998679937217', + 'platform': 'Facebook', + 'date': '2019-09-07 20:25:55', + 'updated': '2019-09-08 00:25:08', + 'type': 'link', + 'title': 'Amber Rudd resigns from cabinet', + 'caption': 'bbc.com', + 'description': ' ', + 'message': 'UK Work and Pensions Secretary Amber Rudd resigns from the government saying she cannot stand by while loyal Conservatives are expelled.', + 'expandedLinks': [{ + 'original': 'https://bbc.in/34y74KD', + 'expanded': 'https://www.bbc.com/news/uk-politics-49623737?ns_mchannel=social&ns_campaign=bbcnews&ns_source=facebook&ocid=socialflow_facebook&fbclid=IwAR0Y5wtV7XAYMOy9za8iQR-hQ_ed0zhqD9ieNWR91SwEzNn0f9V8qZBpAZ8' + }], + 'link': 'https://bbc.in/34y74KD', + 'postUrl': 'https://www.facebook.com/bbcnews/posts/10156998679937217', + 'subscriberCount': 49392159, + 'score': 6.990017615971815, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDVrCPNmQJJR5cv&w=720&h=720&url=https%3A%2F%2Fichef.bbci.co.uk%2Fnews%2F1024%2Fbranded_news%2F7A23%2Fproduction%2F_97176213_breaking_news_bigger.png&cfs=1&_nc_hash=AQDn5vmvnE_HCobw', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCC5QUIIVk7Ey9w&url=https%3A%2F%2Fichef.bbci.co.uk%2Fnews%2F1024%2Fbranded_news%2F7A23%2Fproduction%2F_97176213_breaking_news_bigger.png&_nc_hash=AQDIOh8BQjgML1G9' + }], + 'statistics': { + 'actual': { + 'likeCount': 5261, + 'shareCount': 1809, + 'commentCount': 2123, + 'loveCount': 298, + 'wowCount': 724, + 'hahaCount': 1589, + 'sadCount': 57, + 'angryCount': 43, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 876, + 'shareCount': 225, + 'commentCount': 277, + 'loveCount': 45, + 'wowCount': 77, + 'hahaCount': 93, + 'sadCount': 63, + 'angryCount': 47, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 16403, + 'name': 'BBC News', + 'handle': 'bbcnews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/67191311_10156857876272217_4342089529688064000_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQk5kAdrSFMzze_w-lzADmQENwckqsjInhGPXnxTYNgxJpQ7siiGF44i0wivzxfUmPw&_nc_ht=scontent.xx&oh=5b9721d79e733db34cd496e566100993&oe=5DF5BFA1', + 'subscriberCount': 49397882, + 'url': 'https://www.facebook.com/228735667216', + 'platform': 'Facebook', + 'platformId': '228735667216', + 'verified': True + } + }, { + 'id': 70173549413, + 'platformId': '219367258105115_3290988857609591', + 'platform': 'Facebook', + 'date': '2019-09-07 23:34:33', + 'updated': '2019-09-08 00:41:04', + 'type': 'link', + 'title': 'Trump says Taliban leaders were coming to the US for a Camp David meeting but he canceled it', + 'caption': 'cnn.com', + 'description': ' ', + 'message': "BREAKING: President Trump says he canceled a secret meeting at Camp David tomorrow with Taliban leaders and Afghanistan's President.", + 'expandedLinks': [{ + 'original': 'https://cnn.it/2LwOn1r', + 'expanded': 'https://www.cnn.com/2019/09/07/politics/trump-cancels-secret-meeting-taliban-afghanistan-president/index.html?utm_content=2019-09-07T23%3A34%3A31&utm_source=fbCNNp&utm_medium=social&utm_term=link' + }], + 'link': 'https://cnn.it/2LwOn1r', + 'postUrl': 'https://www.facebook.com/cnnpolitics/posts/3290988857609591', + 'subscriberCount': 2855492, + 'score': 6.918644067796611, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAWT0uLyIBSF_0v&w=619&h=619&url=https%3A%2F%2Fcdn.cnn.com%2Fcnnnext%2Fdam%2Fassets%2F190905172550-02-trump-medal-of-freedom-0905-super-tease.jpg&cfs=1&sx=184&sy=0&sw=619&sh=619&_nc_hash=AQD2RHNKQazn6ZvL', + 'height': 619, + 'width': 619, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBndecYvtod0WTA&url=https%3A%2F%2Fcdn.cnn.com%2Fcnnnext%2Fdam%2Fassets%2F190905172550-02-trump-medal-of-freedom-0905-super-tease.jpg&_nc_hash=AQC-DN0i4-h1BxQZ' + }], + 'statistics': { + 'actual': { + 'likeCount': 108, + 'shareCount': 422, + 'commentCount': 601, + 'loveCount': 3, + 'wowCount': 120, + 'hahaCount': 575, + 'sadCount': 10, + 'angryCount': 202, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 75, + 'shareCount': 40, + 'commentCount': 99, + 'loveCount': 10, + 'wowCount': 7, + 'hahaCount': 27, + 'sadCount': 7, + 'angryCount': 30, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 19471, + 'name': 'CNN Politics', + 'handle': 'cnnpolitics', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22450067_1835100979865060_6024097554775073207_n.png?_nc_cat=1&_nc_oc=AQmpWGKTrzg30Lmmy5ncZ5txlFyDirtObkp2leejFgez6t02RAflIlctecGiymX0NU8&_nc_ht=scontent.xx&oh=bbc41bdb10ef689246595025fc23b309&oe=5E070315', + 'subscriberCount': 2855693, + 'url': 'https://www.facebook.com/219367258105115', + 'platform': 'Facebook', + 'platformId': '219367258105115', + 'verified': True + } + }, { + 'id': 70159788131, + 'platformId': '210277954204_10156789441434205', + 'platform': 'Facebook', + 'date': '2019-09-07 19:41:48', + 'updated': '2019-09-08 00:39:40', + 'type': 'youtube', + 'caption': 'youtube.com', + 'description': "Donald Trump can't handle Alexandria Ocasio-Cortez and The Squad; here is a perfect example. John Iadarola and Jayar Jackson break it down on The Damage Repo...", + 'expandedLinks': [{ + 'original': 'https://www.youtube.com/watch?v=TjVYOzfrpkc&feature=youtu.be', + 'expanded': 'https://www.youtube.com/watch?v=TjVYOzfrpkc&feature=youtu.be' + }], + 'link': 'https://www.youtube.com/watch?v=TjVYOzfrpkc&feature=youtu.be', + 'postUrl': 'https://www.facebook.com/TheYoungTurks/posts/10156789441434205', + 'subscriberCount': 2099948, + 'score': 6.682242990654205, + 'media': [{ + 'type': 'video', + 'url': 'https://www.youtube.com/embed/TjVYOzfrpkc?autoplay=1', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDcJ1B_OysiE_LK&w=720&h=720&url=https%3A%2F%2Fi.ytimg.com%2Fvi%2FTjVYOzfrpkc%2Fmaxresdefault.jpg&cfs=1&sx=261&sy=0&sw=720&sh=720&_nc_hash=AQBiAQ0_vQwsDcOq', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBgnnPU_l3lgrgU&w=1280&h=720&url=https%3A%2F%2Fi.ytimg.com%2Fvi%2FTjVYOzfrpkc%2Fmaxresdefault.jpg&crop&sx=0&sy=0&sw=1280&sh=720&_nc_hash=AQApjZWx0m_zKiJR' + }], + 'statistics': { + 'actual': { + 'likeCount': 305, + 'shareCount': 87, + 'commentCount': 45, + 'loveCount': 56, + 'wowCount': 2, + 'hahaCount': 217, + 'sadCount': 1, + 'angryCount': 2, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 33, + 'shareCount': 17, + 'commentCount': 19, + 'loveCount': 7, + 'wowCount': 3, + 'hahaCount': 15, + 'sadCount': 4, + 'angryCount': 9, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6786, + 'name': 'The Young Turks', + 'handle': 'TheYoungTurks', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1003713_10151543513399205_523422522_n.jpg?_nc_cat=1&_nc_oc=AQnnXFBTIz-GDK79X4ZL1tWD8ZS5F3y_makkEyxpcCf_7U3QmoBvJjb9aWlpiMT8dro&_nc_ht=scontent.xx&oh=5684bdb9a01611f4ca6e9ea9dedbc57e&oe=5DF64CB5', + 'subscriberCount': 2100186, + 'url': 'https://www.facebook.com/210277954204', + 'platform': 'Facebook', + 'platformId': '210277954204', + 'verified': True + } + }, { + 'id': 70162969192, + 'platformId': '140738092630206_2613192798718044', + 'platform': 'Facebook', + 'date': '2019-09-07 20:50:02', + 'updated': '2019-09-08 00:19:09', + 'type': 'link', + 'title': 'Bystanders taunted and laughed as police officers were being fired upon in Philadelphia', + 'caption': 'theblaze.com', + 'description': '"A major moment of disappointment..."', + 'message': 'Just sad.', + 'expandedLinks': [{ + 'original': 'https://bit.ly/2A1JS9y', + 'expanded': 'https://www.theblaze.com/news/philadelphia-bystanders-mocked-police-during-shooting?utm_content=buffer63598&utm_medium=organic&utm_source=facebook&utm_campaign=fb-theblaze' + }], + 'link': 'https://bit.ly/2A1JS9y', + 'postUrl': 'https://www.facebook.com/TheBlaze/posts/2613192798718044', + 'subscriberCount': 2089159, + 'score': 6.564102564102564, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQB1eQyjd6fQFb9f&w=720&h=720&url=https%3A%2F%2Ftheblaze-img.rbl.ms%2Fsimage%2Fhttps%253A%252F%252Fassets.rbl.ms%252F20567472%252F1200x600.jpg%2F2000%252C2000%2FDU%252BXWLoZnuoOJS6M%2Fimg.jpg&cfs=1&sx=460&sy=0&sw=1000&sh=1000&_nc_hash=AQDBlQqCoMv6kpnn', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAaCa9qpoHlPzOt&url=https%3A%2F%2Ftheblaze-img.rbl.ms%2Fsimage%2Fhttps%253A%252F%252Fassets.rbl.ms%252F20567472%252F1200x600.jpg%2F2000%252C2000%2FDU%252BXWLoZnuoOJS6M%2Fimg.jpg&_nc_hash=AQB6ITZyK5kS6QhT' + }], + 'statistics': { + 'actual': { + 'likeCount': 153, + 'shareCount': 675, + 'commentCount': 245, + 'loveCount': 2, + 'wowCount': 31, + 'hahaCount': 9, + 'sadCount': 255, + 'angryCount': 1190, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 71, + 'shareCount': 71, + 'commentCount': 99, + 'loveCount': 5, + 'wowCount': 18, + 'hahaCount': 46, + 'sadCount': 9, + 'angryCount': 71, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6892, + 'name': 'TheBlaze', + 'handle': 'TheBlaze', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/47350623_2141870595850269_7864140219111440384_n.png?_nc_cat=1&_nc_oc=AQmGyVQswjmmaInAkgMKbLJ62jAcb2BShbL78435-MqCEBLedhKr7VO97Nzxt2x220k&_nc_ht=scontent.xx&oh=4a5ce0b44b6400aab9bb78aa2afdee87&oe=5E011864', + 'subscriberCount': 2089166, + 'url': 'https://www.facebook.com/140738092630206', + 'platform': 'Facebook', + 'platformId': '140738092630206', + 'verified': True + } + }, { + 'id': 70175231621, + 'platformId': '167115176655082_2995005573866014', + 'platform': 'Facebook', + 'date': '2019-09-08 00:00:02', + 'updated': '2019-09-08 00:25:19', + 'type': 'link', + 'title': "Experts Want to Give Control of America's Nuclear Missiles to AI", + 'caption': 'vice.com', + 'description': 'If America is attacked with a nuclear bomb, artificial intelligence would automatically fire back even if we are all dead.', + 'message': 'This is a terrible idea.', + 'expandedLinks': [{ + 'original': 'https://www.vice.com/en_us/article/59n3y5/experts-want-to-give-control-of-americas-nuclear-missiles-to-ai?utm_source=vicefbus', + 'expanded': 'https://www.vice.com/en_us/article/59n3y5/experts-want-to-give-control-of-americas-nuclear-missiles-to-ai?utm_source=vicefbus' + }], + 'link': 'https://www.vice.com/en_us/article/59n3y5/experts-want-to-give-control-of-americas-nuclear-missiles-to-ai?utm_source=vicefbus', + 'postUrl': 'https://www.facebook.com/VICE/posts/2995005573866014', + 'subscriberCount': 8174144, + 'score': 6.5423728813559325, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQApMNUEyhs8JYQj&w=720&h=720&url=https%3A%2F%2Fvideo-images.vice.com%2Farticles%2F5d6ea91f390935000a9a7623%2Flede%2F1567533843082-GettyImages-525450811.jpeg%3Fcrop%3D1xw%3A0.999873031995937xh%3Bcenter%2Ccenter%26resize%3D1200%3A%2A&cfs=1&_nc_hash=AQANsmy8nKq7WOUE', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCAk4P30RJ3YPGZ&url=https%3A%2F%2Fvideo-images.vice.com%2Farticles%2F5d6ea91f390935000a9a7623%2Flede%2F1567533843082-GettyImages-525450811.jpeg%3Fcrop%3D1xw%3A0.999873031995937xh%3Bcenter%2Ccenter%26resize%3D1200%3A%2A&_nc_hash=AQBolN324-Kx3RDV' + }], + 'statistics': { + 'actual': { + 'likeCount': 47, + 'shareCount': 93, + 'commentCount': 119, + 'loveCount': 4, + 'wowCount': 14, + 'hahaCount': 55, + 'sadCount': 4, + 'angryCount': 50, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 18, + 'shareCount': 8, + 'commentCount': 13, + 'loveCount': 4, + 'wowCount': 4, + 'hahaCount': 7, + 'sadCount': 2, + 'angryCount': 3, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6646, + 'name': 'VICE', + 'handle': 'VICE', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13427861_1304295039603751_2178102892370936049_n.jpg?_nc_cat=1&_nc_oc=AQmzoEUjC5BCCMVSsIFvWa52KGr3Iqh9f0Y_eezqYMFw7h_EUam7WQdYxEFvJB6LoP0&_nc_ht=scontent.xx&oh=847f8eb6c5132c90382bc0940afbc692&oe=5E02C5BA', + 'subscriberCount': 8177544, + 'url': 'https://www.facebook.com/167115176655082', + 'platform': 'Facebook', + 'platformId': '167115176655082', + 'verified': True + } + }, { + 'id': 70157699253, + 'platformId': '15704546335_10158452982671336', + 'platform': 'Facebook', + 'date': '2019-09-07 19:00:06', + 'updated': '2019-09-08 00:36:22', + 'type': 'link', + 'title': "University of Tennessee turns bullied elementary school student's t-shirt design into official apparel", + 'caption': 'foxnews.com', + 'description': 'The young University of Tennessee fan that was bullied last week for the homemade shirt he wore to his school’s collegiate day was shown an outpouring of support that has since inspired the university to make his design into an official piece of apparel.', + 'message': 'A University of Tennessee fan that was bullied for his homemade shirt has inspired the university to make his design into an official piece of apparel.', + 'expandedLinks': [{ + 'original': 'https://www.foxnews.com/us/university-of-tennessee-bullied-student-t-shirt-design', + 'expanded': 'https://www.foxnews.com/us/university-of-tennessee-bullied-student-t-shirt-design' + }], + 'link': 'https://www.foxnews.com/us/university-of-tennessee-bullied-student-t-shirt-design', + 'postUrl': 'https://www.facebook.com/FoxNews/posts/10158452982671336', + 'subscriberCount': 17162352, + 'score': 6.431667403803627, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBK4qdC7-IEHKlP&w=720&h=720&url=https%3A%2F%2Fstatic.foxnews.com%2Ffoxnews.com%2Fcontent%2Fuploads%2F2019%2F09%2FVOLS.jpg&cfs=1&_nc_hash=AQABy0BtCZZ24GnU', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCfDxKseiGNUbrW&url=https%3A%2F%2Fstatic.foxnews.com%2Ffoxnews.com%2Fcontent%2Fuploads%2F2019%2F09%2FVOLS.jpg&_nc_hash=AQAW52maPsiatQPC' + }], + 'statistics': { + 'actual': { + 'likeCount': 8315, + 'shareCount': 2003, + 'commentCount': 552, + 'loveCount': 3573, + 'wowCount': 43, + 'hahaCount': 48, + 'sadCount': 4, + 'angryCount': 4, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 730, + 'shareCount': 391, + 'commentCount': 635, + 'loveCount': 69, + 'wowCount': 107, + 'hahaCount': 130, + 'sadCount': 52, + 'angryCount': 147, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6897, + 'name': 'Fox News', + 'handle': 'FoxNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22519337_10156158270486336_6810712156586627746_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlXNhWwb8bfCyDXwZo8N1dsslewpEwDTilUVrDkK4ie4qoq_SHj__a9Ws-O0Hsa97M&_nc_ht=scontent.xx&oh=485819e2e49151fcf033722359d3e1a7&oe=5DFF0F55', + 'subscriberCount': 17163279, + 'url': 'https://www.facebook.com/15704546335', + 'platform': 'Facebook', + 'platformId': '15704546335', + 'verified': True + } + }, { + 'id': 70175836725, + 'platformId': '475549362567960_2304081899714688', + 'platform': 'Facebook', + 'date': '2019-09-08 00:00:13', + 'updated': '2019-09-08 00:40:20', + 'type': 'native_video', + 'description': 'Socialist Rep. Alexandria Ocasio-Cortez (D-N.Y.) gets a hard lesson on free speech from a FED-UP Trump supporter.\n\n"You\'re bringing incitement and hate speech together, which is totally absurd and fascist!" 🔥🔥🔥', + 'message': '"You\'re bringing incitement and hate speech together, which is totally absurd and fascist!" 🔥🔥🔥', + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/RantNationBlazeTV/videos/464956854293046/', + 'expanded': 'https://www.facebook.com/RantNationBlazeTV/videos/464956854293046/' + }], + 'link': 'https://www.facebook.com/RantNationBlazeTV/videos/464956854293046/', + 'postUrl': 'https://www.facebook.com/ConservativeReview/posts/2304081899714688', + 'subscriberCount': 1534190, + 'score': 6.382978723404255, + 'media': [{ + 'type': 'video', + 'url': 'https://video.xx.fbcdn.net/v/t42.9040-2/61970984_386455858745544_3407560622387232768_n.mp4?_nc_cat=100&efg=eyJybHIiOjM4OCwicmxhIjoxMjU0LCJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_oc=AQn_aFAILPRkh2IRUK8n3eD_2E-dd1HgkPtOeVszpkraWTkobKQ6qsDQtgjy6Fb5KdI&rl=388&vabr=216&_nc_ht=video.xx&oh=1394caeb743e2e2e01dd18b9fd97e2bf&oe=5D768C8B', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/s720x720/60887318_389396874998383_6277103585096368128_n.jpg?_nc_cat=107&_nc_oc=AQmaErj0jsp9aj0ykhWYX6QQUirRB-KbJhG71aByptbfEBbJ2wtnzwWBVljlckXSlMY&_nc_ht=scontent.xx&oh=827d6e3a1fe6a8feef392c2bdfbee2bf&oe=5E11CCF4', + 'height': 405, + 'width': 720, + 'full': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/60887318_389396874998383_6277103585096368128_n.jpg?_nc_cat=107&_nc_oc=AQmaErj0jsp9aj0ykhWYX6QQUirRB-KbJhG71aByptbfEBbJ2wtnzwWBVljlckXSlMY&_nc_ht=scontent.xx&oh=e6c5f31f225cfa18771e516e829b686f&oe=5E0AB3BC' + }], + 'statistics': { + 'actual': { + 'likeCount': 329, + 'shareCount': 93, + 'commentCount': 62, + 'loveCount': 43, + 'wowCount': 4, + 'hahaCount': 66, + 'sadCount': 0, + 'angryCount': 3, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 44, + 'shareCount': 17, + 'commentCount': 9, + 'loveCount': 5, + 'wowCount': 4, + 'hahaCount': 4, + 'sadCount': 3, + 'angryCount': 8, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 400323, + 'name': 'Conservative Review', + 'handle': 'ConservativeReview', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/13511965_914218092034416_3120935637616696621_n.png?_nc_cat=1&_nc_oc=AQmwEB1vd_Ss605EjbSWDYSwe1KWV44N7RpDVd5HB--hRPK-_AmZmVSl498baoafRac&_nc_ht=scontent.xx&oh=1ea31cc2116c322669c75f2180fc2684&oe=5E01B826', + 'subscriberCount': 1534190, + 'url': 'https://www.facebook.com/475549362567960', + 'platform': 'Facebook', + 'platformId': '475549362567960', + 'verified': True + }, + 'videoLengthMS': 309779 + }, { + 'id': 70158011942, + 'platformId': '86680728811_10158783168978812', + 'platform': 'Facebook', + 'date': '2019-09-07 19:03:29', + 'updated': '2019-09-08 00:45:25', + 'type': 'native_video', + 'message': "More than a thousand Bahamians evacuated by the Grand Celebration cruise ship arrived in Palm beach, Florida, after it dropped more than 112 tons of supplies in Freeport, Grand Bahama in the wake of Hurricane Dorian's catastrophic damage to the Bahamas. https://abcn.ws/34FavPY", + 'expandedLinks': [{ + 'original': 'https://abcn.ws/34FavPY', + 'expanded': 'https://abcnews.go.com/International/images-reveal-devastating-effects-hurricane-dorian-bahamas/story?id=65430705' + }, { + 'original': 'https://www.facebook.com/ABCNews/videos/813554229059535/', + 'expanded': 'https://www.facebook.com/ABCNews/videos/813554229059535/' + }], + 'link': 'https://www.facebook.com/ABCNews/videos/813554229059535/', + 'postUrl': 'https://www.facebook.com/ABCNews/posts/10158783168978812', + 'subscriberCount': 14195962, + 'score': 6.30638852672751, + 'media': [{ + 'type': 'video', + 'url': 'https://video.xx.fbcdn.net/v/t42.9040-2/70819311_877173089317895_5183022024343158784_n.mp4?_nc_cat=106&efg=eyJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_log=1&_nc_oc=AQle7QTGkM8MnwJnEwTCnpIZwxY3Ruf6nhgWjKGydds6rYEyE7prcVjih77vpSsTMwk&_nc_ht=video.xx&oh=26d059466ad9324be438eca5971408ef&oe=5D75A7BF', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/p720x720/67596431_813554819059476_4503891397383815168_n.jpg?_nc_cat=110&_nc_log=1&_nc_oc=AQl4gV72rTeimEX1BaVB5cz-y64moZRDErM1FnmHeyVS1eCNtOZvSyLCgfFH-isp_fI&_nc_ht=scontent.xx&oh=88bd30dabcf261b63a9f9a7b06ed1e58&oe=5E0E1C73', + 'height': 720, + 'width': 720, + 'full': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/67596431_813554819059476_4503891397383815168_n.jpg?_nc_cat=110&_nc_log=1&_nc_oc=AQl4gV72rTeimEX1BaVB5cz-y64moZRDErM1FnmHeyVS1eCNtOZvSyLCgfFH-isp_fI&_nc_ht=scontent.xx&oh=86a02d83d45ba512d2b117ac49cc277a&oe=5DF45C28' + }], + 'statistics': { + 'actual': { + 'likeCount': 2654, + 'shareCount': 871, + 'commentCount': 300, + 'loveCount': 880, + 'wowCount': 107, + 'hahaCount': 1, + 'sadCount': 23, + 'angryCount': 1, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 247, + 'shareCount': 212, + 'commentCount': 103, + 'loveCount': 17, + 'wowCount': 104, + 'hahaCount': 8, + 'sadCount': 72, + 'angryCount': 4, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13878, + 'name': 'ABC News', + 'handle': 'ABCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02', + 'subscriberCount': 14196629, + 'url': 'https://www.facebook.com/86680728811', + 'platform': 'Facebook', + 'platformId': '86680728811', + 'verified': True + }, + 'videoLengthMS': 38761 + }, { + 'id': 70170110679, + 'platformId': '809836039361670_970006750011264', + 'platform': 'Facebook', + 'date': '2019-09-07 22:55:45', + 'updated': '2019-09-08 00:27:14', + 'type': 'photo', + 'message': 'Divided government, North Dakota style. Congratulations Governor Doug Burgum on the Bison win!', + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/RepArmstrongND/photos/a.811676579177616/970004493344823/?type=3', + 'expanded': 'https://www.facebook.com/RepArmstrongND/photos/a.811676579177616/970004493344823/?type=3' + }], + 'link': 'https://www.facebook.com/RepArmstrongND/photos/a.811676579177616/970004493344823/?type=3', + 'postUrl': 'https://www.facebook.com/RepArmstrongND/posts/970006750011264', + 'subscriberCount': 2971, + 'score': 6.275862068965517, + 'media': [{ + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t1.0-9/s720x720/69756298_970004496678156_6907594845594845184_n.jpg?_nc_cat=102&_nc_oc=AQkJ7Ki9PssoDAIvRWj_h-45qslAaM7D9V8I9ivdG17oVKUlDRM6Z94TXRMxFKHZNHM&_nc_ht=scontent.xx&oh=ee7139bf36eb3b3270a2cc6cf27cc1f6&oe=5DFA7146', + 'height': 720, + 'width': 540, + 'full': 'https://scontent.xx.fbcdn.net/v/t1.0-9/s720x720/69756298_970004496678156_6907594845594845184_n.jpg?_nc_cat=102&_nc_oc=AQkJ7Ki9PssoDAIvRWj_h-45qslAaM7D9V8I9ivdG17oVKUlDRM6Z94TXRMxFKHZNHM&_nc_ht=scontent.xx&oh=ee7139bf36eb3b3270a2cc6cf27cc1f6&oe=5DFA7146' + }], + 'statistics': { + 'actual': { + 'likeCount': 149, + 'shareCount': 3, + 'commentCount': 9, + 'loveCount': 10, + 'wowCount': 0, + 'hahaCount': 11, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 15, + 'shareCount': 2, + 'commentCount': 3, + 'loveCount': 2, + 'wowCount': 4, + 'hahaCount': 1, + 'sadCount': 0, + 'angryCount': 2, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 5599817, + 'name': 'Congressman Kelly Armstrong', + 'handle': 'RepArmstrongND', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49948250_810468759298398_5015266941133127680_n.jpg?_nc_cat=108&_nc_oc=AQnxIy4F6VXgzgmGwJSNyp_LXl4eUeIRidDRPiwritTvefyha72Fn3uQZ5Ig3w8CUgE&_nc_ht=scontent.xx&oh=1a6d324e012fcd21703991138eef4fd0&oe=5DFFD242', + 'subscriberCount': 2985, + 'url': 'https://www.facebook.com/809836039361670', + 'platform': 'Facebook', + 'platformId': '809836039361670', + 'verified': True + } + }, { + 'id': 70174684045, + 'platformId': '86680728811_10158783960173812', + 'platform': 'Facebook', + 'date': '2019-09-07 23:56:22', + 'updated': '2019-09-08 00:45:25', + 'type': 'link', + 'title': 'Trump calls off secret meeting with Taliban, Afghan leaders', + 'caption': 'abcnews.go.com', + 'description': ' ', + 'message': 'Pres. Donald Trump calls off a secret Camp David meeting with Taliban and Afghanistan leaders.', + 'expandedLinks': [{ + 'original': 'https://abcn.ws/2LxGetr', + 'expanded': 'https://abcnews.go.com/Politics/wireStory/trump-calls-off-secret-meeting-taliban-afghan-leaders-65458544?cid=social_fb_abcn' + }], + 'link': 'https://abcn.ws/2LxGetr', + 'postUrl': 'https://www.facebook.com/ABCNews/posts/10158783960173812', + 'subscriberCount': 14195962, + 'score': 6.206896551724138, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDQCdBbCnf161B-&w=720&h=720&url=https%3A%2F%2Fs3.amazonaws.com%2Fprod-cust-photo-posts-jfaikqealaka%2F3316-1217482569dd6b4ec4429254b58a2a06.jpg&cfs=1&_nc_hash=AQBWsanQJrG_OzeB', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCdbu41auqfYy4v&url=https%3A%2F%2Fs3.amazonaws.com%2Fprod-cust-photo-posts-jfaikqealaka%2F3316-1217482569dd6b4ec4429254b58a2a06.jpg&_nc_hash=AQDv6mnngddET3gP' + }], + 'statistics': { + 'actual': { + 'likeCount': 237, + 'shareCount': 199, + 'commentCount': 529, + 'loveCount': 12, + 'wowCount': 99, + 'hahaCount': 477, + 'sadCount': 11, + 'angryCount': 56, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 106, + 'shareCount': 49, + 'commentCount': 46, + 'loveCount': 13, + 'wowCount': 14, + 'hahaCount': 12, + 'sadCount': 13, + 'angryCount': 8, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13878, + 'name': 'ABC News', + 'handle': 'ABCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02', + 'subscriberCount': 14196629, + 'url': 'https://www.facebook.com/86680728811', + 'platform': 'Facebook', + 'platformId': '86680728811', + 'verified': True + } + }, { + 'id': 70170406994, + 'platformId': '182919686769_10156515199261770', + 'platform': 'Facebook', + 'date': '2019-09-07 23:00:15', + 'updated': '2019-09-08 00:40:44', + 'type': 'link', + 'title': 'Trump Tweets Praise From Border Patrol Union President After Mexico Announces Massive Drop In Migrant Arrivals', + 'caption': 'dailycaller.com', + 'description': 'Donald Trump tweeted a quote Saturday morning from National Border Patrol Council president Brandon Judd lauding Mexico as a "true Border Security Partner."', + 'message': 'Yuge!', + 'expandedLinks': [{ + 'original': 'https://dailycaller.com/2019/09/07/trump-mexico-drop-migrant-border/', + 'expanded': 'https://dailycaller.com/2019/09/07/trump-mexico-drop-migrant-border/' + }], + 'link': 'https://dailycaller.com/2019/09/07/trump-mexico-drop-migrant-border/', + 'postUrl': 'https://www.facebook.com/DailyCaller/posts/10156515199261770', + 'subscriberCount': 5408428, + 'score': 6.142118863049095, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDv-0rr39_tiNQG&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73dd29c6f0a001ba5b1f43%2Fd6b21b289a461b60f8abcdf7f7f4df99ce425025_df2ee53b3e8f0b1976783e6fc45fe7ddf70c493d_facebook&cfs=1&_nc_hash=AQAr645fhuWZwd4v', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQA4QIFRa_STKnzo&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73dd29c6f0a001ba5b1f43%2Fd6b21b289a461b60f8abcdf7f7f4df99ce425025_df2ee53b3e8f0b1976783e6fc45fe7ddf70c493d_facebook&_nc_hash=AQCXE7dGq6zEOF_J' + }], + 'statistics': { + 'actual': { + 'likeCount': 1868, + 'shareCount': 245, + 'commentCount': 92, + 'loveCount': 161, + 'wowCount': 4, + 'hahaCount': 4, + 'sadCount': 1, + 'angryCount': 2, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 112, + 'shareCount': 54, + 'commentCount': 108, + 'loveCount': 8, + 'wowCount': 15, + 'hahaCount': 47, + 'sadCount': 7, + 'angryCount': 36, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13489, + 'name': 'The Daily Caller', + 'handle': 'DailyCaller', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471', + 'subscriberCount': 5408115, + 'url': 'https://www.facebook.com/182919686769', + 'platform': 'Facebook', + 'platformId': '182919686769', + 'verified': True + } + }, { + 'id': 70168125843, + 'platformId': '86680728811_10158783746343812', + 'platform': 'Facebook', + 'date': '2019-09-07 22:24:58', + 'updated': '2019-09-08 00:45:25', + 'type': 'link', + 'title': 'Canadian Bianca Andreescu beats Serena Williams to win the U.S. Open', + 'caption': 'abcnews.go.com', + 'description': ' ', + 'message': "Canadian Bianca Andreescu beat U.S. tennis star Serena Williams in straight sets to win the U.S. Open womens' finals on Saturday.", + 'expandedLinks': [{ + 'original': 'https://abcn.ws/2LsasxS', + 'expanded': 'https://abcnews.go.com/GMA/Culture/canadian-bianca-andreescu-beats-serena-williams-win-us/story?id=64291644&cid=social_fb_abcn' + }], + 'link': 'https://abcn.ws/2LsasxS', + 'postUrl': 'https://www.facebook.com/ABCNews/posts/10158783746343812', + 'subscriberCount': 14195962, + 'score': 6.134556574923548, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCSeL5RfwH3NRrE&w=558&h=558&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FUS%2Fserena-04-as-epa-190908_hpMain_16x9_992.jpg&cfs=1&sx=334&sy=0&sw=558&sh=558&_nc_hash=AQC2o8CMuGiSX_Ji', + 'height': 558, + 'width': 558, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDhKzi-HhJI-7m8&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FUS%2Fserena-04-as-epa-190908_hpMain_16x9_992.jpg&_nc_hash=AQDwy9HVRXiDJ_EN' + }], + 'statistics': { + 'actual': { + 'likeCount': 868, + 'shareCount': 213, + 'commentCount': 244, + 'loveCount': 114, + 'wowCount': 229, + 'hahaCount': 58, + 'sadCount': 277, + 'angryCount': 3, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 133, + 'shareCount': 63, + 'commentCount': 57, + 'loveCount': 16, + 'wowCount': 17, + 'hahaCount': 15, + 'sadCount': 16, + 'angryCount': 10, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13878, + 'name': 'ABC News', + 'handle': 'ABCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02', + 'subscriberCount': 14196629, + 'url': 'https://www.facebook.com/86680728811', + 'platform': 'Facebook', + 'platformId': '86680728811', + 'verified': True + } + }, { + 'id': 70161254809, + 'platformId': '123624513983_10157817449458984', + 'platform': 'Facebook', + 'date': '2019-09-07 20:15:00', + 'updated': '2019-09-08 00:38:24', + 'type': 'photo', + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/WesternJournal/photos/a.10150384918003984/10157817449183984/?type=3', + 'expanded': 'https://www.facebook.com/WesternJournal/photos/a.10150384918003984/10157817449183984/?type=3' + }], + 'link': 'https://www.facebook.com/WesternJournal/photos/a.10150384918003984/10157817449183984/?type=3', + 'postUrl': 'https://www.facebook.com/WesternJournal/posts/10157817449458984', + 'subscriberCount': 5185113, + 'score': 6.1133023975251355, + 'media': [{ + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t1.0-9/p720x720/69919332_10157817449193984_4486464700724281344_n.jpg?_nc_cat=111&_nc_oc=AQliMBxdazxx-1thEwmLC_H1WPeTy2OADXEU6yi2-3sizvlCtytAR-mlCvsY6YrfGhc&_nc_ht=scontent.xx&oh=94ddf879177de28b11fbd50ea2839715&oe=5E01E4FD', + 'height': 720, + 'width': 720, + 'full': 'https://scontent.xx.fbcdn.net/v/t1.0-9/p720x720/69919332_10157817449193984_4486464700724281344_n.jpg?_nc_cat=111&_nc_oc=AQliMBxdazxx-1thEwmLC_H1WPeTy2OADXEU6yi2-3sizvlCtytAR-mlCvsY6YrfGhc&_nc_ht=scontent.xx&oh=94ddf879177de28b11fbd50ea2839715&oe=5E01E4FD' + }], + 'statistics': { + 'actual': { + 'likeCount': 18299, + 'shareCount': 10102, + 'commentCount': 968, + 'loveCount': 2038, + 'wowCount': 43, + 'hahaCount': 134, + 'sadCount': 12, + 'angryCount': 22, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 3703, + 'shareCount': 902, + 'commentCount': 270, + 'loveCount': 183, + 'wowCount': 19, + 'hahaCount': 47, + 'sadCount': 16, + 'angryCount': 32, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 93420, + 'name': 'The Western Journal', + 'handle': 'WesternJournal', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49664345_10157205261148984_1256195277434388480_n.png?_nc_cat=1&_nc_oc=AQkUo1GJrlGqxXcfjFxGkOcXookw_tgn8qATXCSI0ICt6sibuBdTtyIuuWj9iPLw5ZM&_nc_ht=scontent.xx&oh=bf010f921f678fbb0032a465900b5f24&oe=5DF8F16D', + 'subscriberCount': 5184899, + 'url': 'https://www.facebook.com/123624513983', + 'platform': 'Facebook', + 'platformId': '123624513983', + 'verified': True + } + }, { + 'id': 70160858713, + 'platformId': '114945745226947_2335472573174242', + 'platform': 'Facebook', + 'date': '2019-09-07 20:00:51', + 'updated': '2019-09-08 00:40:02', + 'type': 'link', + 'title': 'Celebrity cruise ship reroutes in Bahamas to deliver food, aid after Dorian', + 'caption': 'nbcnews.com', + 'description': ' ', + 'message': "The Celebrity Equinox's kitchen staff is preparing 10,000 meals with guests even pitching in to help in the aftermath of the hurricane.", + 'expandedLinks': [{ + 'original': 'https://on.msnbc.com/2LxhrFT', + 'expanded': 'https://www.nbcnews.com/news/world/after-hurricane-dorian-celebrity-cruise-ship-bahamas-reroutes-deliver-food-n1050796?cid=sm_npd_ms_fb_lw' + }], + 'link': 'https://on.msnbc.com/2LxhrFT', + 'postUrl': 'https://www.facebook.com/thelastword/posts/2335472573174242', + 'subscriberCount': 515865, + 'score': 6.0418604651162795, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCl0TaY2DIEK806&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2999941%2F190906-celebrity-equinox-hurricane-relief-al-1053jpg_692e60a6dbb531606db72afafb2b2440.nbcnews-fp-1200-630.jpg&cfs=1&sx=570&sy=0&sw=630&sh=630&_nc_hash=AQAItXWulq2vZygr', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBNaHxGrar-t0VK&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2999941%2F190906-celebrity-equinox-hurricane-relief-al-1053jpg_692e60a6dbb531606db72afafb2b2440.nbcnews-fp-1200-630.jpg&_nc_hash=AQDJ5XFXZ3aZb6GG' + }], + 'statistics': { + 'actual': { + 'likeCount': 768, + 'shareCount': 234, + 'commentCount': 26, + 'loveCount': 266, + 'wowCount': 3, + 'hahaCount': 1, + 'sadCount': 1, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 43, + 'shareCount': 34, + 'commentCount': 42, + 'loveCount': 8, + 'wowCount': 11, + 'hahaCount': 8, + 'sadCount': 13, + 'angryCount': 56, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 4004, + 'name': "The Last Word With Lawrence O'Donnell", + 'handle': 'thelastword', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/16114622_1184240434964134_5160717321521180833_n.png?_nc_cat=1&_nc_oc=AQkE59Us5gvqt0N90qJZW6XSCRHGK5YGgwcB-G1YctCjO7mmMEWXfrnnaX-jZYV633o&_nc_ht=scontent.xx&oh=eaa0c18d2823fe813960f06f60585643&oe=5E08F8C6', + 'subscriberCount': 515865, + 'url': 'https://www.facebook.com/114945745226947', + 'platform': 'Facebook', + 'platformId': '114945745226947', + 'verified': True + } + }, { + 'id': 70161348039, + 'platformId': '10513336322_10158174166611323', + 'platform': 'Facebook', + 'date': '2019-09-07 20:15:07', + 'updated': '2019-09-08 00:42:26', + 'type': 'link', + 'title': 'Tories extend lead over Labour to 10% despite chaotic week', + 'caption': 'theguardian.com', + 'description': 'More than half of all leave voters are now planning to vote for Boris Johnson', + 'expandedLinks': [{ + 'original': 'https://www.theguardian.com/politics/2019/sep/07/tories-extend-lead-over-labour-to-10-despite-chaotic-week?CMP=fb_gu&utm_medium=Social&utm_source=Facebook#Echobox=1567874494', + 'expanded': 'https://www.theguardian.com/politics/2019/sep/07/tories-extend-lead-over-labour-to-10-despite-chaotic-week?CMP=fb_gu&utm_medium=Social&utm_source=Facebook#Echobox=1567874494' + }], + 'link': 'https://www.theguardian.com/politics/2019/sep/07/tories-extend-lead-over-labour-to-10-despite-chaotic-week?CMP=fb_gu&utm_medium=Social&utm_source=Facebook#Echobox=1567874494', + 'postUrl': 'https://www.facebook.com/theguardian/posts/10158174166611323', + 'subscriberCount': 8186083, + 'score': 6.033670033670034, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBHdBpCuwOKnssn&w=630&h=630&url=https%3A%2F%2Fi.guim.co.uk%2Fimg%2Fmedia%2F90f73b5d84cf6cd9995aa9f3b96b7f35d81327fa%2F0_63_4000_2400%2Fmaster%2F4000.jpg%3Fwidth%3D1200%26height%3D630%26quality%3D85%26auto%3Dformat%26fit%3Dcrop%26overlay-align%3Dbottom%252Cleft%26overlay-width%3D100p%26overlay-base64%3DL2ltZy9zdGF0aWMvb3ZlcmxheXMvdGctZGVmYXVsdC5wbmc%26enable%3Dupscale%26s%3D5d518132e7ce9c21438063fc255d64c9&cfs=1&sx=423&sy=0&sw=630&sh=630&_nc_hash=AQA2aANjQU1hmNc7', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAUfzEgAwSgTSmh&url=https%3A%2F%2Fi.guim.co.uk%2Fimg%2Fmedia%2F90f73b5d84cf6cd9995aa9f3b96b7f35d81327fa%2F0_63_4000_2400%2Fmaster%2F4000.jpg%3Fwidth%3D1200%26height%3D630%26quality%3D85%26auto%3Dformat%26fit%3Dcrop%26overlay-align%3Dbottom%252Cleft%26overlay-width%3D100p%26overlay-base64%3DL2ltZy9zdGF0aWMvb3ZlcmxheXMvdGctZGVmYXVsdC5wbmc%26enable%3Dupscale%26s%3D5d518132e7ce9c21438063fc255d64c9&_nc_hash=AQDlZVsz9TV4cIMo' + }], + 'statistics': { + 'actual': { + 'likeCount': 281, + 'shareCount': 206, + 'commentCount': 582, + 'loveCount': 24, + 'wowCount': 122, + 'hahaCount': 225, + 'sadCount': 230, + 'angryCount': 122, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 123, + 'shareCount': 46, + 'commentCount': 60, + 'loveCount': 11, + 'wowCount': 9, + 'hahaCount': 26, + 'sadCount': 7, + 'angryCount': 15, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 5740, + 'name': 'The Guardian', + 'handle': 'theguardian', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/46160148_10157340584076323_3990431626264838144_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkKD6tb0oraHl_Qq9dA1S51ktyWhE9lPo7udOrFCRkfCctJldfDrwPVn7PcSDSY5Sc&_nc_ht=scontent.xx&oh=8c51a127f7d06b002a6fcba57abe5181&oe=5DFDE22E', + 'subscriberCount': 8186263, + 'url': 'https://www.facebook.com/10513336322', + 'platform': 'Facebook', + 'platformId': '10513336322', + 'verified': True + } + }, { + 'id': 70162846697, + 'platformId': '86680728811_10158783468813812', + 'platform': 'Facebook', + 'date': '2019-09-07 20:48:11', + 'updated': '2019-09-08 00:45:25', + 'type': 'link', + 'title': 'Duchess Meghan supports pal Serena Williams at US Open final', + 'caption': 'abcnews.go.com', + 'description': ' ', + 'message': "Serena Williams' close friend Meghan, the Duchess of Sussex, took a last-minute flight from London to New York to watch Williams play against 19-year-old Canadian Bianca Andreescu.", + 'expandedLinks': [{ + 'original': 'https://abcn.ws/2Lx2p2T', + 'expanded': 'https://abcnews.go.com/GMA/Culture/duchess-meghan-supports-pal-serena-williams-us-open/story?id=65428970&cid=social_fb_abcn&fbclid=IwAR0nlM2hr7NUPWnuk_WV1XcUS_IYX3FiPCRl2WJ2RzFS2htHqdSUywFN9no' + }], + 'link': 'https://abcn.ws/2Lx2p2T', + 'postUrl': 'https://www.facebook.com/ABCNews/posts/10158783468813812', + 'subscriberCount': 14195962, + 'score': 6.016528925619835, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBa1SjmHvfScQgZ&w=558&h=558&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FUS%2Fmeghan-markle-01-as-usa-190908_hpMain_16x9_992.jpg&cfs=1&sx=12&sy=0&sw=558&sh=558&_nc_hash=AQBCA2r4xAUn3lnA', + 'height': 558, + 'width': 558, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQApcYviDYU_iBlu&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FUS%2Fmeghan-markle-01-as-usa-190908_hpMain_16x9_992.jpg&_nc_hash=AQCbSSXvjqOh_moq' + }], + 'statistics': { + 'actual': { + 'likeCount': 1618, + 'shareCount': 76, + 'commentCount': 160, + 'loveCount': 269, + 'wowCount': 8, + 'hahaCount': 32, + 'sadCount': 3, + 'angryCount': 18, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 148, + 'shareCount': 71, + 'commentCount': 63, + 'loveCount': 17, + 'wowCount': 19, + 'hahaCount': 17, + 'sadCount': 17, + 'angryCount': 11, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13878, + 'name': 'ABC News', + 'handle': 'ABCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02', + 'subscriberCount': 14196629, + 'url': 'https://www.facebook.com/86680728811', + 'platform': 'Facebook', + 'platformId': '86680728811', + 'verified': True + } + }, { + 'id': 70163621987, + 'platformId': '908009612563863_2986539611377509', + 'platform': 'Facebook', + 'date': '2019-09-07 21:00:24', + 'updated': '2019-09-08 00:48:09', + 'type': 'native_video', + 'message': "Watch President Trump try to convince his supporters that he's 'competent'", + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/NowThisPolitics/videos/733410037104996/', + 'expanded': 'https://www.facebook.com/NowThisPolitics/videos/733410037104996/' + }], + 'link': 'https://www.facebook.com/NowThisPolitics/videos/733410037104996/', + 'postUrl': 'https://www.facebook.com/NowThisPolitics/posts/2986539611377509', + 'subscriberCount': 6074083, + 'score': 6.015006821282401, + 'media': [{ + 'type': 'video', + 'url': 'https://video.xx.fbcdn.net/v/t42.9040-2/41228504_471356220011696_2218706403474800640_n.mp4?_nc_cat=105&efg=eyJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_log=1&_nc_oc=AQnv4wN5MIFBZEQnQlm8UqthX3urGg9G4rxVLRSLXw5PTzatvEX8YB2-kbDQNu597IA&_nc_ht=video.xx&oh=74fb95761cacc344c6a6ea6430e8e10e&oe=5D76A240', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/p720x720/38959840_531215240657811_1501424857630375936_n.jpg?_nc_cat=110&_nc_log=1&_nc_oc=AQnpQThDDK9CszIVBfogQrEoXnsBquG5sVegZ5s03mA9VBnPSa1eQmmcSBz90QoX8M4&_nc_ht=scontent.xx&oh=b886a06dd6bcb662132ed7bc4248cf19&oe=5E0D62F9', + 'height': 720, + 'width': 720, + 'full': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/38959840_531215240657811_1501424857630375936_n.jpg?_nc_cat=110&_nc_log=1&_nc_oc=AQnpQThDDK9CszIVBfogQrEoXnsBquG5sVegZ5s03mA9VBnPSa1eQmmcSBz90QoX8M4&_nc_ht=scontent.xx&oh=c2d30b81ca63fc2e70d5b4d25405c419&oe=5E0DD6A2' + }], + 'statistics': { + 'actual': { + 'likeCount': 277, + 'shareCount': 481, + 'commentCount': 1522, + 'loveCount': 27, + 'wowCount': 60, + 'hahaCount': 1583, + 'sadCount': 75, + 'angryCount': 384, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 266, + 'shareCount': 218, + 'commentCount': 111, + 'loveCount': 29, + 'wowCount': 22, + 'hahaCount': 27, + 'sadCount': 31, + 'angryCount': 29, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 311636, + 'name': 'NowThis Politics', + 'handle': 'NowThisPolitics', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/28276603_1939096412788506_2850422809072819205_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlBSULvu9xr5smvB3kmRub5MfL3SpyPxNX94GEyc5skmb19swOR40nthDv1Kip3kcw&_nc_ht=scontent.xx&oh=b734d3faa39291c805198e3ad7de3450&oe=5DFF0890', + 'subscriberCount': 6074746, + 'url': 'https://www.facebook.com/908009612563863', + 'platform': 'Facebook', + 'platformId': '908009612563863', + 'verified': True + }, + 'videoLengthMS': 154281 + }, { + 'id': 70163173554, + 'platformId': '21898300328_10159284952995329', + 'platform': 'Facebook', + 'date': '2019-09-07 20:55:00', + 'updated': '2019-09-08 00:35:29', + 'type': 'native_video', + 'message': "I'm obsessed with marshmallows ✨", + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/BuzzFeed/videos/2514328838787000/', + 'expanded': 'https://www.facebook.com/BuzzFeed/videos/2514328838787000/' + }], + 'link': 'https://www.facebook.com/BuzzFeed/videos/2514328838787000/', + 'postUrl': 'https://www.facebook.com/BuzzFeed/posts/10159284952995329', + 'subscriberCount': 11870805, + 'score': 5.962962962962963, + 'media': [{ + 'type': 'video', + 'url': 'https://video.xx.fbcdn.net/v/t42.9040-2/70691299_728554827590593_3557958716256944128_n.mp4?_nc_cat=110&efg=eyJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_log=1&_nc_oc=AQkMkRLolK3KyQb50CCZ1lJy-iI9Y7FbTL4UiJz6G2j0LVvsBHe1PKdJjT9804sPfys&_nc_ht=video.xx&oh=7827d58f3e48c091ca7eaf5a49bc280a&oe=5D769C33', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/p720x720/67695914_2514330655453485_3708524408340480000_n.jpg?_nc_cat=102&_nc_log=1&_nc_oc=AQkHho01TS09AnhvtVCMUZPEu6nBoy5iGEXYQFVC7uG9Eokr-hPZdB6b934Kdlo4wCw&_nc_ht=scontent.xx&oh=8b65fd6f342f1d51825e0647ce1d4e54&oe=5E0EACC5', + 'height': 720, + 'width': 720, + 'full': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/67695914_2514330655453485_3708524408340480000_n.jpg?_nc_cat=102&_nc_log=1&_nc_oc=AQkHho01TS09AnhvtVCMUZPEu6nBoy5iGEXYQFVC7uG9Eokr-hPZdB6b934Kdlo4wCw&_nc_ht=scontent.xx&oh=d28bfa966e5df214266a2859f2264032&oe=5E086471' + }], + 'statistics': { + 'actual': { + 'likeCount': 638, + 'shareCount': 113, + 'commentCount': 235, + 'loveCount': 282, + 'wowCount': 0, + 'hahaCount': 20, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 110, + 'shareCount': 34, + 'commentCount': 28, + 'loveCount': 26, + 'wowCount': 4, + 'hahaCount': 8, + 'sadCount': 4, + 'angryCount': 2, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 5862, + 'name': 'BuzzFeed', + 'handle': 'BuzzFeed', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/11222622_10153870407270329_4094729505669388790_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQlaEUp906VUUmeEPgfBCNmaczf4owSg6ehvwRebY_UVmSGVjDB_IUr4WGPgzRnptXU&_nc_ht=scontent.xx&oh=96b0a01485175975acdaeb06feb9d222&oe=5E06A54B', + 'subscriberCount': 11870650, + 'url': 'https://www.facebook.com/21898300328', + 'platform': 'Facebook', + 'platformId': '21898300328', + 'verified': True + }, + 'videoLengthMS': 184894 + }, { + 'id': 70159572807, + 'platformId': '34407447433_10156144115942434', + 'platform': 'Facebook', + 'date': '2019-09-07 19:43:00', + 'updated': '2019-09-08 00:29:54', + 'type': 'link', + 'title': 'Victim Allowed to Testify Through the 2,500 Personalities She Invented to Survive Childhood Sexual Abuse', + 'caption': 'jezebel.com', + 'description': 'An Australian survivor of horrific childhood sexual, physical, and mental abuse might be the first person in the world to testify in court through the personalties she invented in order to survive the trauma.\r\n', + 'message': '"Make no mistake, my dad caused my Multiple Personality Disorder," survivor Jeni Haynes told the court.', + 'expandedLinks': [{ + 'original': 'https://trib.al/hAZh3HX', + 'expanded': 'https://jezebel.com/victim-allowed-to-testify-through-the-2-500-personaliti-1837956427?rev=1567885072422&utm_medium=socialflow&utm_campaign=socialflow_jezebel_facebook&utm_source=jezebel_facebook&fbclid=IwAR37XfjKPTCo-byOlmC1hhdH-ka2zZmxQ24rDjMLXcGUhle1wMSkBviiZpE&fbclid=IwAR1C0S2pjDBEpenxMFVLXP0sm_iwVMG-zah8UZZ6SyRcMl9vD8_qbv029s0&fbclid=IwAR1o83O6e0Cw9V69W4gIhERu2UOcRJq-06qE8cqoMblOiZg0kgFwO4G8mK4' + }], + 'link': 'https://trib.al/hAZh3HX', + 'postUrl': 'https://www.facebook.com/Jezebel/posts/10156144115942434', + 'subscriberCount': 815764, + 'score': 5.844827586206897, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBEDDfWffmh6g5X&w=720&h=720&url=https%3A%2F%2Fi.kinja-img.com%2Fgawker-media%2Fimage%2Fupload%2Fs--bxvolnQa--%2Fc_fill%2Cfl_progressive%2Cg_center%2Ch_900%2Cq_80%2Cw_1600%2Fxqm0wmlfe2niquacansa.jpg&cfs=1&_nc_hash=AQBjyPpwDRCl8eHJ', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCrFKkF0CWTEaIs&url=https%3A%2F%2Fi.kinja-img.com%2Fgawker-media%2Fimage%2Fupload%2Fs--bxvolnQa--%2Fc_fill%2Cfl_progressive%2Cg_center%2Ch_900%2Cq_80%2Cw_1600%2Fxqm0wmlfe2niquacansa.jpg&_nc_hash=AQAZRCMo7DPfzgCP' + }], + 'statistics': { + 'actual': { + 'likeCount': 108, + 'shareCount': 37, + 'commentCount': 15, + 'loveCount': 6, + 'wowCount': 50, + 'hahaCount': 0, + 'sadCount': 120, + 'angryCount': 3, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 14, + 'shareCount': 6, + 'commentCount': 7, + 'loveCount': 7, + 'wowCount': 4, + 'hahaCount': 5, + 'sadCount': 3, + 'angryCount': 12, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6753, + 'name': 'Jezebel', + 'handle': 'Jezebel', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10632833_10152652146387434_9205889665163163075_n.png?_nc_cat=1&_nc_oc=AQmE3moAD_e4DDH0Qk-IkkzGJ36IXDD-O29mmUauemxpi5JLbY-oMjPMeCglmwSb0Rs&_nc_ht=scontent.xx&oh=d7afae2d39ef36c76291f53c416d6c76&oe=5E0F900A', + 'subscriberCount': 815764, + 'url': 'https://www.facebook.com/34407447433', + 'platform': 'Facebook', + 'platformId': '34407447433', + 'verified': True + } + }, { + 'id': 70163771159, + 'platformId': '32109457015_10156178850177016', + 'platform': 'Facebook', + 'date': '2019-09-07 21:02:21', + 'updated': '2019-09-08 00:23:22', + 'type': 'link', + 'title': 'Ahead of Standing Ovation at New Hampshire Democratic Convention, Sanders Camp Announces Endorsements From 53 State Dems', + 'caption': 'commondreams.org', + 'description': '"This latest round of endorsements shows we are not only retaining and engaging supporters from 2016, but building new support from a broad swath of leaders from around the state."', + 'message': 'A big few days for the Vermont senator.', + 'expandedLinks': [{ + 'original': 'https://www.commondreams.org/news/2019/09/07/ahead-standing-ovation-new-hampshire-democratic-convention-sanders-camp-announces-0', + 'expanded': 'https://www.commondreams.org/news/2019/09/07/ahead-standing-ovation-new-hampshire-democratic-convention-sanders-camp-announces-0' + }], + 'link': 'https://www.commondreams.org/news/2019/09/07/ahead-standing-ovation-new-hampshire-democratic-convention-sanders-camp-announces-0', + 'postUrl': 'https://www.facebook.com/commondreams.org/posts/10156178850177016', + 'subscriberCount': 366669, + 'score': 5.829059829059829, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDKBlgliFGvmLnF&w=500&h=500&url=https%3A%2F%2Fwww.commondreams.org%2Fsites%2Fdefault%2Ffiles%2Fheadline%2Fthumbs%2Fsanders_0.png&cfs=1&sx=347&sy=0&sw=500&sh=500&_nc_hash=AQDwJC-1_y7Jjb-K', + 'height': 500, + 'width': 500, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDw3ov_HaM2oZ-I&url=https%3A%2F%2Fwww.commondreams.org%2Fsites%2Fdefault%2Ffiles%2Fheadline%2Fthumbs%2Fsanders_0.png&_nc_hash=AQA9h2rZgQp5Bf9N' + }], + 'statistics': { + 'actual': { + 'likeCount': 365, + 'shareCount': 143, + 'commentCount': 22, + 'loveCount': 149, + 'wowCount': 0, + 'hahaCount': 3, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 32, + 'shareCount': 30, + 'commentCount': 8, + 'loveCount': 7, + 'wowCount': 4, + 'hahaCount': 3, + 'sadCount': 9, + 'angryCount': 24, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 9840, + 'name': 'CommonDreams', + 'handle': 'commondreams.org', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/10469767_10152172973972016_8063428021861554001_n.jpg?_nc_cat=103&_nc_oc=AQlnGcnqptQQZZC2ssw_mVUJY3OL5CzMA_2hp5GQtIg_0HMCwMmn9q28KrUoNRmgbtU&_nc_ht=scontent.xx&oh=71d9a7a103ab2f477e27840eb92ac030&oe=5E040D0C', + 'subscriberCount': 366664, + 'url': 'https://www.facebook.com/32109457015', + 'platform': 'Facebook', + 'platformId': '32109457015', + 'verified': True + } + }, { + 'id': 70158517493, + 'platformId': '10513336322_10158174007266323', + 'platform': 'Facebook', + 'date': '2019-09-07 19:11:30', + 'updated': '2019-09-08 00:42:26', + 'type': 'link', + 'title': "'I refuse to die in here': the marine who survived two tours and is now fighting deportation", + 'caption': 'theguardian.com', + 'description': 'In his 21 months of detention, Jose Segovia Benitez says he’s been denied critical treatment for his PTSD and heart condition', + 'message': '"I try to hold onto any kind of strength I have, but I’m not doing well mentally, emotionally, physically.”', + 'expandedLinks': [{ + 'original': 'https://www.theguardian.com/us-news/2019/sep/06/i-refuse-to-die-in-here-the-marine-who-survived-two-tours-and-is-now-fighting-deportation?CMP=fb_gu&utm_medium=Social&utm_source=Facebook#Echobox=1567867472', + 'expanded': 'https://www.theguardian.com/us-news/2019/sep/06/i-refuse-to-die-in-here-the-marine-who-survived-two-tours-and-is-now-fighting-deportation?CMP=fb_gu&utm_medium=Social&utm_source=Facebook#Echobox=1567867472' + }], + 'link': 'https://www.theguardian.com/us-news/2019/sep/06/i-refuse-to-die-in-here-the-marine-who-survived-two-tours-and-is-now-fighting-deportation?CMP=fb_gu&utm_medium=Social&utm_source=Facebook#Echobox=1567867472', + 'postUrl': 'https://www.facebook.com/theguardian/posts/10158174007266323', + 'subscriberCount': 8186083, + 'score': 5.7974683544303796, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQC9TU3AzHWjm7pP&w=630&h=630&url=https%3A%2F%2Fi.guim.co.uk%2Fimg%2Fmedia%2F42943b55dd663362b3a2eebf62397e83e7e6931e%2F0_215_3200_1920%2Fmaster%2F3200.jpg%3Fwidth%3D1200%26height%3D630%26quality%3D85%26auto%3Dformat%26fit%3Dcrop%26overlay-align%3Dbottom%252Cleft%26overlay-width%3D100p%26overlay-base64%3DL2ltZy9zdGF0aWMvb3ZlcmxheXMvdGctZGVmYXVsdC5wbmc%26enable%3Dupscale%26s%3Df38d9b9d9f1c0a1c34aabd72cc8836ae&cfs=1&sx=90&sy=0&sw=630&sh=630&_nc_hash=AQB0G9pHPr3Yu-eW', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCgxJMwqaqfvKmo&url=https%3A%2F%2Fi.guim.co.uk%2Fimg%2Fmedia%2F42943b55dd663362b3a2eebf62397e83e7e6931e%2F0_215_3200_1920%2Fmaster%2F3200.jpg%3Fwidth%3D1200%26height%3D630%26quality%3D85%26auto%3Dformat%26fit%3Dcrop%26overlay-align%3Dbottom%252Cleft%26overlay-width%3D100p%26overlay-base64%3DL2ltZy9zdGF0aWMvb3ZlcmxheXMvdGctZGVmYXVsdC5wbmc%26enable%3Dupscale%26s%3Df38d9b9d9f1c0a1c34aabd72cc8836ae&_nc_hash=AQA3ZYBOq8NMQ35O' + }], + 'statistics': { + 'actual': { + 'likeCount': 217, + 'shareCount': 456, + 'commentCount': 114, + 'loveCount': 6, + 'wowCount': 38, + 'hahaCount': 5, + 'sadCount': 420, + 'angryCount': 576, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 133, + 'shareCount': 50, + 'commentCount': 63, + 'loveCount': 12, + 'wowCount': 9, + 'hahaCount': 27, + 'sadCount': 7, + 'angryCount': 15, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 5740, + 'name': 'The Guardian', + 'handle': 'theguardian', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/46160148_10157340584076323_3990431626264838144_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkKD6tb0oraHl_Qq9dA1S51ktyWhE9lPo7udOrFCRkfCctJldfDrwPVn7PcSDSY5Sc&_nc_ht=scontent.xx&oh=8c51a127f7d06b002a6fcba57abe5181&oe=5DFDE22E', + 'subscriberCount': 8186263, + 'url': 'https://www.facebook.com/10513336322', + 'platform': 'Facebook', + 'platformId': '10513336322', + 'verified': True + } + }, { + 'id': 70170343869, + 'platformId': '123624513983_10157820072718984', + 'platform': 'Facebook', + 'date': '2019-09-07 23:00:02', + 'updated': '2019-09-08 00:38:24', + 'type': 'link', + 'title': 'Age 8 Boy Fights Off Mountain Lion After It Bit Him in Head and Dragged His Body Down Hill', + 'caption': 'westernjournal.com', + 'description': 'Pike Carlson, 8, fought back against a mountain lion who attacked and bit him on the face while he was playing outside in his backyard.', + 'message': "The little boy knew he had to fight back when the 65-pound mountain lion began to attack. 'I was just punching, trying to grab anything that I can, like a stick,' he said. 'I did find a stick and I tried to get it in the eye but soon the stick snapped.'", + 'expandedLinks': [{ + 'original': 'https://www.westernjournal.com/age-8-boy-fights-off-mountain-lion-bit-head-dragged-body-hill/?utm_source=facebook&utm_medium=westernjournalism&utm_content=2019-09-07&utm_campaign=manualpost', + 'expanded': 'https://www.westernjournal.com/age-8-boy-fights-off-mountain-lion-bit-head-dragged-body-hill/?utm_source=facebook&utm_medium=westernjournalism&utm_content=2019-09-07&utm_campaign=manualpost' + }], + 'link': 'https://www.westernjournal.com/age-8-boy-fights-off-mountain-lion-bit-head-dragged-body-hill/?utm_source=facebook&utm_medium=westernjournalism&utm_content=2019-09-07&utm_campaign=manualpost', + 'postUrl': 'https://www.facebook.com/WesternJournal/posts/10157820072718984', + 'subscriberCount': 5185113, + 'score': 5.781818181818182, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQC3rSRZQb_RMo2s&w=720&h=720&url=https%3A%2F%2Fwww.westernjournal.com%2Fwp-content%2Fuploads%2F2019%2F09%2FUntitled-design-7.jpg&cfs=1&_nc_hash=AQCFUuE0zmdVLXCn', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAtGTuiuTsPrguo&url=https%3A%2F%2Fwww.westernjournal.com%2Fwp-content%2Fuploads%2F2019%2F09%2FUntitled-design-7.jpg&_nc_hash=AQBrZ83xlVP9Nzno' + }], + 'statistics': { + 'actual': { + 'likeCount': 620, + 'shareCount': 229, + 'commentCount': 196, + 'loveCount': 53, + 'wowCount': 333, + 'hahaCount': 7, + 'sadCount': 151, + 'angryCount': 1, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 84, + 'shareCount': 49, + 'commentCount': 55, + 'loveCount': 8, + 'wowCount': 13, + 'hahaCount': 18, + 'sadCount': 8, + 'angryCount': 40, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 93420, + 'name': 'The Western Journal', + 'handle': 'WesternJournal', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49664345_10157205261148984_1256195277434388480_n.png?_nc_cat=1&_nc_oc=AQkUo1GJrlGqxXcfjFxGkOcXookw_tgn8qATXCSI0ICt6sibuBdTtyIuuWj9iPLw5ZM&_nc_ht=scontent.xx&oh=bf010f921f678fbb0032a465900b5f24&oe=5DF8F16D', + 'subscriberCount': 5184899, + 'url': 'https://www.facebook.com/123624513983', + 'platform': 'Facebook', + 'platformId': '123624513983', + 'verified': True + } + }, { + 'id': 70160309952, + 'platformId': '338028696036_10158536072151037', + 'platform': 'Facebook', + 'date': '2019-09-07 20:00:01', + 'updated': '2019-09-08 00:27:38', + 'type': 'link', + 'title': 'Bernie Sanders joins LeBron James in support of bill allowing college athletes to be paid', + 'caption': 'sports.yahoo.com', + 'description': 'A bill awaiting vote in the California state assembly could trigger an upheaval in NCAA athletics.', + 'message': '"College athletes are workers. Pay them. "', + 'expandedLinks': [{ + 'original': 'https://news.yahoo.com/lebron-james-bernie-sanders-california-ncaa-034858749.html', + 'expanded': 'https://news.yahoo.com/lebron-james-bernie-sanders-california-ncaa-034858749.html' + }], + 'link': 'https://news.yahoo.com/lebron-james-bernie-sanders-california-ncaa-034858749.html', + 'postUrl': 'https://www.facebook.com/yahoonews/posts/10158536072151037', + 'subscriberCount': 7866135, + 'score': 5.771844660194175, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQChKNVaz3GsOVDZ&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73e65dca9a410594352803%2F72d18ae212954572d6739405c59d6cc2d7369ee6_96a348701633291e228c01c4c0f51215419bd124_facebook&cfs=1&_nc_hash=AQDTVopnB73UtOAk', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDggKqkAAIXLAVB&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73e65dca9a410594352803%2F72d18ae212954572d6739405c59d6cc2d7369ee6_96a348701633291e228c01c4c0f51215419bd124_facebook&_nc_hash=AQAmylurbIXZ1Tbw' + }], + 'statistics': { + 'actual': { + 'likeCount': 627, + 'shareCount': 146, + 'commentCount': 233, + 'loveCount': 70, + 'wowCount': 5, + 'hahaCount': 77, + 'sadCount': 1, + 'angryCount': 30, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 55, + 'shareCount': 35, + 'commentCount': 50, + 'loveCount': 9, + 'wowCount': 9, + 'hahaCount': 24, + 'sadCount': 9, + 'angryCount': 15, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 16337, + 'name': 'Yahoo News', + 'handle': 'yahoonews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1234558_10151822723996037_1232781499_n.jpg?_nc_cat=1&_nc_oc=AQkPmfbCJFc9Ll_w6v-FBqGGulHvLsK6m9J20HAPS45YGyFGlkUw6ZZKS6yuaKxI_V0&_nc_ht=scontent.xx&oh=e2ffaa2bbb04dd746da7d26542134656&oe=5DFF9BED', + 'subscriberCount': 7865795, + 'url': 'https://www.facebook.com/338028696036', + 'platform': 'Facebook', + 'platformId': '338028696036', + 'verified': True + } + }, { + 'id': 70160979151, + 'platformId': '155869377766434_3572675879419083', + 'platform': 'Facebook', + 'date': '2019-09-07 20:02:05', + 'updated': '2019-09-08 00:20:11', + 'type': 'link', + 'title': 'Thousands listed as missing in Bahamas in Hurricane Dorian’s wake', + 'caption': 'nbcnews.com', + 'description': ' ', + 'message': 'Thousands of people are desperately trying to find loved ones in the Bahamas.', + 'expandedLinks': [{ + 'original': 'https://nbcnews.to/2Lvx4h8', + 'expanded': 'https://www.nbcnews.com/news/world/thousands-listed-missing-bahamas-hurricane-dorian-s-wake-n1050791?cid=sm_npd_nn_fb_ma' + }], + 'link': 'https://nbcnews.to/2Lvx4h8', + 'postUrl': 'https://www.facebook.com/NBCNews/posts/3572675879419083', + 'subscriberCount': 9970622, + 'score': 5.688976377952756, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD6stbq-rFP7Jda&w=720&h=720&url=https%3A%2F%2Fmedia1.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F3000001%2F190906-bahamas-aftermath-dorian-al-1111_fbd341856b3fa8ce3a08a04f0fca9b14.nbcnews-fp-1200-630.jpg&cfs=1&_nc_hash=AQBVY5Go-4zF-tlS', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCocMqpL-yoqFsO&url=https%3A%2F%2Fmedia1.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F3000001%2F190906-bahamas-aftermath-dorian-al-1111_fbd341856b3fa8ce3a08a04f0fca9b14.nbcnews-fp-1200-630.jpg&_nc_hash=AQDssZadqERvIDEf' + }], + 'statistics': { + 'actual': { + 'likeCount': 75, + 'shareCount': 375, + 'commentCount': 59, + 'loveCount': 1, + 'wowCount': 43, + 'hahaCount': 1, + 'sadCount': 888, + 'angryCount': 3, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 61, + 'shareCount': 50, + 'commentCount': 54, + 'loveCount': 10, + 'wowCount': 19, + 'hahaCount': 19, + 'sadCount': 21, + 'angryCount': 20, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13889, + 'name': 'NBC News', + 'handle': 'NBCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/58460954_3259154034104604_4667908299973197824_n.png?_nc_cat=1&_nc_oc=AQkP72-xbAw6uUN-KZG8hLfS-bT5o6BRIMSNURKuXBbEhrFa7sT75fvZfTBZDVa21CU&_nc_ht=scontent.xx&oh=ddb1e61de6dabbf61e903f59efde1f0c&oe=5DF7A653', + 'subscriberCount': 9970540, + 'url': 'https://www.facebook.com/155869377766434', + 'platform': 'Facebook', + 'platformId': '155869377766434', + 'verified': True + } + }, { + 'id': 70170621365, + 'platformId': '10498053716_10156935398183717', + 'platform': 'Facebook', + 'date': '2019-09-07 23:00:00', + 'updated': '2019-09-08 00:44:10', + 'type': 'link', + 'title': "FAIL: Toronto Chick-fil-A Protesters Ignored as They Stage 'Die-In' Demonstration", + 'caption': 'mrctv.org', + 'description': 'You know the best thing about the\xa0grand opening of a Chick-fil-A?\xa0', + 'message': 'VIDEO: Protesters held a "die-in" in front of Toronto\'s first Chick-fil-A. Patrons essentially ignored them by simply walking over or around them to get their food.', + 'expandedLinks': [{ + 'original': 'https://www.mrctv.org/blog/toronto-chick-fil-protesters-ignored-they-stage-die-demonstration', + 'expanded': 'https://www.mrctv.org/blog/toronto-chick-fil-protesters-ignored-they-stage-die-demonstration' + }], + 'link': 'https://www.mrctv.org/blog/toronto-chick-fil-protesters-ignored-they-stage-die-demonstration', + 'postUrl': 'https://www.facebook.com/mediaresearchcenter/posts/10156935398183717', + 'subscriberCount': 1853276, + 'score': 5.529411764705882, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCo1ARLaetR4aJ5&w=720&h=720&url=https%3A%2F%2Fcdn.mrctv.org%2Ffiles%2Fstyles%2Fmedium%2Fs3%2F2019-09%2Flooney%2520bags.png%3Fh%3D7d1f3709&cfs=1&_nc_hash=AQD7e20NjdW5a_7I', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCIixan6kLeR8U_&url=https%3A%2F%2Fcdn.mrctv.org%2Ffiles%2Fstyles%2Fmedium%2Fs3%2F2019-09%2Flooney%2520bags.png%3Fh%3D7d1f3709&_nc_hash=AQAL8Q6mRJoTX-E7' + }], + 'statistics': { + 'actual': { + 'likeCount': 167, + 'shareCount': 106, + 'commentCount': 228, + 'loveCount': 16, + 'wowCount': 4, + 'hahaCount': 504, + 'sadCount': 1, + 'angryCount': 8, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 17, + 'shareCount': 22, + 'commentCount': 61, + 'loveCount': 4, + 'wowCount': 5, + 'hahaCount': 42, + 'sadCount': 4, + 'angryCount': 32, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 327932, + 'name': 'Media Research Center', + 'handle': 'mediaresearchcenter', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/22814117_10155267787348717_9035099093135610710_n.png?_nc_cat=1&_nc_oc=AQnlBU3OCfeS-5QWg2v10Je2qqRgOr8VQS088-pc6gM4VZ_wGRCLBF_h5ObNobn7SOE&_nc_ht=scontent.xx&oh=4444f21775a6df49bc0c533f492d5953&oe=5E0B587B', + 'subscriberCount': 1853272, + 'url': 'https://www.facebook.com/10498053716', + 'platform': 'Facebook', + 'platformId': '10498053716', + 'verified': True + } + }, { + 'id': 70158941544, + 'platformId': '618786471475708_2644759942211674', + 'platform': 'Facebook', + 'date': '2019-09-07 19:30:00', + 'updated': '2019-09-08 00:30:47', + 'type': 'native_video', + 'message': 'This bear took a nap in a restaurant’s bathroom — and slept through the staff’s attempts to get it out 🐻💤😂', + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/BuzzFeedNews/videos/233829937513838/', + 'expanded': 'https://www.facebook.com/BuzzFeedNews/videos/233829937513838/' + }], + 'link': 'https://www.facebook.com/BuzzFeedNews/videos/233829937513838/', + 'postUrl': 'https://www.facebook.com/BuzzFeedNews/posts/2644759942211674', + 'subscriberCount': 3015477, + 'score': 5.408888888888889, + 'media': [{ + 'type': 'video', + 'url': 'https://video.xx.fbcdn.net/v/t42.9040-2/70829649_1369510883210885_6464376971089412096_n.mp4?_nc_cat=108&efg=eyJ2ZW5jb2RlX3RhZyI6InN2ZV9zZCJ9&_nc_log=1&_nc_oc=AQkHMJ6YyNp5AfqE41JKIISreqFTOyyA9I_-UFm2Chcl-8t99lw6g8x6ckxMudx1dKg&_nc_ht=video.xx&oh=a383273ff1f2a898fbde4a5d47bef863&oe=5D75AC2F', + 'height': 0, + 'width': 0 + }, { + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/p720x720/67264768_233830367513795_1857633506536980480_n.jpg?_nc_cat=109&_nc_log=1&_nc_oc=AQkL-NnpOehPSuLPO1pX0UitjWUMHRXEc8dlumSIUOlEmjWlnFFFM0DGVf5XOIusMrw&_nc_ht=scontent.xx&oh=13a862e4afac730aca26503d79ea752d&oe=5E09E16D', + 'height': 720, + 'width': 720, + 'full': 'https://scontent.xx.fbcdn.net/v/t15.5256-10/67264768_233830367513795_1857633506536980480_n.jpg?_nc_cat=109&_nc_log=1&_nc_oc=AQkL-NnpOehPSuLPO1pX0UitjWUMHRXEc8dlumSIUOlEmjWlnFFFM0DGVf5XOIusMrw&_nc_ht=scontent.xx&oh=7303e4e196b9d46aa13a0c6638d20567&oe=5DF4E136' + }], + 'statistics': { + 'actual': { + 'likeCount': 447, + 'shareCount': 223, + 'commentCount': 173, + 'loveCount': 132, + 'wowCount': 35, + 'hahaCount': 204, + 'sadCount': 3, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 104, + 'shareCount': 32, + 'commentCount': 19, + 'loveCount': 38, + 'wowCount': 5, + 'hahaCount': 21, + 'sadCount': 4, + 'angryCount': 2, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 18756, + 'name': 'BuzzFeed News', + 'handle': 'BuzzFeedNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/37324661_1987747984579543_6544772647132069888_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQl4xuZMtXJ6qFqyRhwKzfdvsAYA1JGI1ajz4X8q4bIHiObnrMGyXWEFiDcVxaVlrgM&_nc_ht=scontent.xx&oh=3a3c2ae104e50e8860b8dcf413215500&oe=5DFB7022', + 'subscriberCount': 3017031, + 'url': 'https://www.facebook.com/618786471475708', + 'platform': 'Facebook', + 'platformId': '618786471475708', + 'verified': True + }, + 'videoLengthMS': 55891 + }, { + 'id': 70176047384, + 'platformId': '273864989376427_2990324384397127', + 'platform': 'Facebook', + 'date': '2019-09-08 00:24:18', + 'updated': '2019-09-08 00:44:08', + 'type': 'link', + 'title': "Trump says he's calling off Afghanistan peace talks and secret meeting he had planned with Taliban", + 'caption': 'nbcnews.com', + 'description': '"Unbeknownst to almost everyone, the major Taliban leaders ... were going to secretly meet with me at Camp David on Sunday," Trump tweeted.', + 'message': 'BREAKING: Days ahead of 9/11 anniversary, Pres. Trump announces that he was set to hold secret talks with the Taliban at Camp David in the US this weekend but he has called off the talks after a US service member was killed in a suicide attack in Kabul.', + 'expandedLinks': [{ + 'original': 'https://on.msnbc.com/34zdcTc', + 'expanded': 'https://www.nbcnews.com/news/world/trump-says-he-s-canceling-afghanistan-peace-talks-secret-meeting-n1051141?cid=sm_npd_ms_fb_ma' + }], + 'link': 'https://on.msnbc.com/34zdcTc', + 'postUrl': 'https://www.facebook.com/msnbc/posts/2990324384397127', + 'subscriberCount': 2290512, + 'score': 5.383333333333334, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCNOPbDFAkJaFnF&w=630&h=630&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&cfs=1&sx=195&sy=0&sw=630&sh=630&_nc_hash=AQBScacjujSkq3Mk', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD2KTNNygZQ_OI2&url=https%3A%2F%2Fmedia2.s-nbcnews.com%2Fj%2Fnewscms%2F2019_36%2F2996636%2F190904-donald-trump-ew-319p_fa205db6b34b6641eb4336a3bcfc21cb.nbcnews-fp-1200-630.jpg&_nc_hash=AQAnWtxyQdPBskf5' + }], + 'statistics': { + 'actual': { + 'likeCount': 55, + 'shareCount': 155, + 'commentCount': 272, + 'loveCount': 1, + 'wowCount': 85, + 'hahaCount': 228, + 'sadCount': 9, + 'angryCount': 164, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 40, + 'shareCount': 21, + 'commentCount': 47, + 'loveCount': 3, + 'wowCount': 7, + 'hahaCount': 16, + 'sadCount': 11, + 'angryCount': 35, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 8324, + 'name': 'MSNBC', + 'handle': 'msnbc', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15741035_1414682885294626_1846918595507309997_n.jpg?_nc_cat=1&_nc_oc=AQmNSDImiJ4dNS4a9BuTF3tFyF2W0xSOLxgQfdY6R_AXaZm8hkQc6XT-GWy5NIEe080&_nc_ht=scontent.xx&oh=968e2c2f1d76f19278ac5985b55af46d&oe=5E003BB2', + 'subscriberCount': 2290512, + 'url': 'https://www.facebook.com/273864989376427', + 'platform': 'Facebook', + 'platformId': '273864989376427', + 'verified': True + } + }, { + 'id': 70164500985, + 'platformId': '1435071773455316_2255755588053593', + 'platform': 'Facebook', + 'date': '2019-09-07 21:16:33', + 'updated': '2019-09-08 00:27:30', + 'type': 'link', + 'title': 'Tony Perkins On Mass Shootings: ‘The Problem Is Not The Absence Of Laws; It’s An Absence Of Morality’', + 'caption': 'dailywire.com', + 'description': 'On September 1, former police officer and President of the Family Research Council (FRC) Tony Perkins appeared on Fox News to discuss the shooting in Odessa, Texas.', + 'message': '"We’ve driven religion from our public life, and we\'re shocked that we no longer have morality and we no longer value human life."', + 'expandedLinks': [{ + 'original': 'http://dlvr.it/RCgJkW', + 'expanded': 'https://www.dailywire.com/news/51501/tony-perkins-mass-shootings-problem-not-absence-frank-camp?utm_source=facebook&utm_medium=social&utm_campaign=dwbrand' + }], + 'link': 'http://dlvr.it/RCgJkW', + 'postUrl': 'https://www.facebook.com/DailyWire/posts/2255755588053593', + 'subscriberCount': 1934539, + 'score': 5.293103448275862, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQB1Mu1MvMS8dD0w&w=720&h=720&url=https%3A%2F%2Fwww.dailywire.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2Fopen_graph%2Fpublic%2Fuploads%2F2019%2F09%2Fd5f463cb-13ac-4cf1-869e-54c62197ef38.jpeg%3Fitok%3Dkjy8V7cH&cfs=1&_nc_hash=AQBtAC5c8_9jGNKY', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDnXhV0fMoQ4Rf2&url=https%3A%2F%2Fwww.dailywire.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2Fopen_graph%2Fpublic%2Fuploads%2F2019%2F09%2Fd5f463cb-13ac-4cf1-869e-54c62197ef38.jpeg%3Fitok%3Dkjy8V7cH&_nc_hash=AQBdy8C2aVF71Aox' + }], + 'statistics': { + 'actual': { + 'likeCount': 391, + 'shareCount': 133, + 'commentCount': 40, + 'loveCount': 23, + 'wowCount': 0, + 'hahaCount': 0, + 'sadCount': 27, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 33, + 'shareCount': 13, + 'commentCount': 24, + 'loveCount': 3, + 'wowCount': 6, + 'hahaCount': 17, + 'sadCount': 4, + 'angryCount': 16, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 650861, + 'name': 'Daily Wire', + 'handle': 'DailyWire', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/27655057_1815794295383060_2228253987427136016_n.png?_nc_cat=1&_nc_oc=AQm_uPD8ZwlgfmUIjiJBxewrWpNXIPkUpDdGdWdkYu9LXrRzIuUYx8pGdp5Kmcz1HU8&_nc_ht=scontent.xx&oh=ab8e2768dce63a6200349ce2d7dc8a11&oe=5DF6BB9F', + 'subscriberCount': 1934601, + 'url': 'https://www.facebook.com/1435071773455316', + 'platform': 'Facebook', + 'platformId': '1435071773455316', + 'verified': True + } + }, { + 'id': 70161018459, + 'platformId': '10716057521_10156407106532522', + 'platform': 'Facebook', + 'date': '2019-09-07 20:00:00', + 'updated': '2019-09-08 00:22:00', + 'type': 'link', + 'title': "The House is investigating why a routine military trip to the Middle East stopped at Trump's resort in Scotland", + 'caption': 'theweek.com', + 'description': "The House Oversight Committee has been investigating why the crew of a C-17 military transport plane making a routine trip from the United States to Kuwait to deliver supplies stayed at President Trump's Turnberry resort in Scotland, Politico reports. The House panel reportedly sent a letter to the....", + 'message': '"The committee will be forced to consider alternative steps if the Pentagon does not begin complying voluntarily in the coming days.":', + 'expandedLinks': [{ + 'original': 'https://theweek.com/speedreads/863494/house-investigating-why-routine-military-trip-middle-east-stopped-trumps-resort-scotland?utm_source=links&utm_medium=website&utm_campaign=facebook', + 'expanded': 'https://theweek.com/speedreads/863494/house-investigating-why-routine-military-trip-middle-east-stopped-trumps-resort-scotland?utm_source=links&utm_medium=website&utm_campaign=facebook' + }], + 'link': 'https://theweek.com/speedreads/863494/house-investigating-why-routine-military-trip-middle-east-stopped-trumps-resort-scotland?utm_source=links&utm_medium=website&utm_campaign=facebook', + 'postUrl': 'https://www.facebook.com/theweek/posts/10156407106532522', + 'subscriberCount': 419619, + 'score': 5.277777777777778, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAL4RYh8-R5KEjD&w=720&h=720&url=https%3A%2F%2Fimages.theweek.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2Ftw_image_6_4%2Fpublic%2Fgettyimages-998481948.jpg%3Fitok%3D2NNqgY23&cfs=1&_nc_hash=AQDlZL4Hgb09INsh', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAYZfhWWCLhvXQO&url=https%3A%2F%2Fimages.theweek.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2Ftw_image_6_4%2Fpublic%2Fgettyimages-998481948.jpg%3Fitok%3D2NNqgY23&_nc_hash=AQDOJVAtl325JCyk' + }], + 'statistics': { + 'actual': { + 'likeCount': 136, + 'shareCount': 81, + 'commentCount': 42, + 'loveCount': 0, + 'wowCount': 37, + 'hahaCount': 8, + 'sadCount': 4, + 'angryCount': 72, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 18, + 'shareCount': 9, + 'commentCount': 13, + 'loveCount': 6, + 'wowCount': 5, + 'hahaCount': 10, + 'sadCount': 4, + 'angryCount': 7, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 15634, + 'name': 'THE WEEK', + 'handle': 'theweek', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/21462349_10154703764612522_6251347795371418727_n.jpg?_nc_cat=110&_nc_oc=AQlbKAutDhFEqh0Qx-B7AQHdixiuSL6cSzNkUlV8rK3NiEkp4i1D4Fr32CCce7z6lUA&_nc_ht=scontent.xx&oh=70ad5528b8f299f3aff2b696de103f35&oe=5E0AC355', + 'subscriberCount': 419619, + 'url': 'https://www.facebook.com/10716057521', + 'platform': 'Facebook', + 'platformId': '10716057521', + 'verified': True + } + }, { + 'id': 70162137656, + 'platformId': '519305544814653_2705955072816345', + 'platform': 'Facebook', + 'date': '2019-09-07 20:30:00', + 'updated': '2019-09-08 00:43:12', + 'type': 'photo', + 'expandedLinks': [{ + 'original': 'https://www.facebook.com/theconservativetribune/photos/a.520106441401230/2705954829483036/?type=3', + 'expanded': 'https://www.facebook.com/theconservativetribune/photos/a.520106441401230/2705954829483036/?type=3' + }], + 'link': 'https://www.facebook.com/theconservativetribune/photos/a.520106441401230/2705954829483036/?type=3', + 'postUrl': 'https://www.facebook.com/theconservativetribune/posts/2705955072816345', + 'subscriberCount': 4272313, + 'score': 5.141030392883618, + 'media': [{ + 'type': 'photo', + 'url': 'https://scontent.xx.fbcdn.net/v/t1.0-9/p720x720/69969764_2705954832816369_3306078688440745984_n.jpg?_nc_cat=107&_nc_oc=AQmIHRqDlQ1NjP1w0QuHG0ik274DNEJIqy4_inibBtrShHzoLkx0TBFnDKT3s4KJbjI&_nc_ht=scontent.xx&oh=b8f70055db120a5004f777bef149643a&oe=5DC9826E', + 'height': 720, + 'width': 720, + 'full': 'https://scontent.xx.fbcdn.net/v/t1.0-9/p720x720/69969764_2705954832816369_3306078688440745984_n.jpg?_nc_cat=107&_nc_oc=AQmIHRqDlQ1NjP1w0QuHG0ik274DNEJIqy4_inibBtrShHzoLkx0TBFnDKT3s4KJbjI&_nc_ht=scontent.xx&oh=b8f70055db120a5004f777bef149643a&oe=5DC9826E' + }], + 'statistics': { + 'actual': { + 'likeCount': 16507, + 'shareCount': 8305, + 'commentCount': 803, + 'loveCount': 1967, + 'wowCount': 30, + 'hahaCount': 102, + 'sadCount': 7, + 'angryCount': 20, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 3901, + 'shareCount': 948, + 'commentCount': 249, + 'loveCount': 197, + 'wowCount': 22, + 'hahaCount': 34, + 'sadCount': 21, + 'angryCount': 24, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 48733, + 'name': 'Conservative Tribune by WJ', + 'handle': 'theconservativetribune', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/46353000_2202571529821371_2816384259860725760_n.png?_nc_cat=1&_nc_oc=AQmyLmtQSJjNV6pRGGi1jlDx51XV7ULbxHYoibyNBKmronK_dpS07OVljvF5-BdX07s&_nc_ht=scontent.xx&oh=eeade969630115fc0c1ec64d4a462e0f&oe=5DF58CE0', + 'subscriberCount': 4272095, + 'url': 'https://www.facebook.com/519305544814653', + 'platform': 'Facebook', + 'platformId': '519305544814653', + 'verified': True + } + }, { + 'id': 70169831725, + 'platformId': '119984188013847_2736300513048855', + 'platform': 'Facebook', + 'date': '2019-09-07 22:47:53', + 'updated': '2019-09-08 00:45:59', + 'type': 'link', + 'title': 'Buttigieg: God will punish us for global warming - WND', + 'caption': 'wnd.com', + 'description': "This argument should turn the staunchest conservative into a daily campaigner against global warming, or climate change as it's called since the warming stopped.", + 'message': '\'Mayor Pete\' becomes \'Preacher Pete\' – warns of God\'s coming judgment Democrat presidential candidate Pete Buttigieg has made his mark in the crowded field by touting his homosexuality and "marriage" to another man, his support of abortion up to the moment of birth and his attacks on Vice President Mike Pence and conservative Christians. In his latest move against the faithful, he has taken on the role of theologian and prophet, warning of God\'s coming judgment against this generation. For what sin, you ask. Well, Prophet Pete has just made up a new one ...', + 'expandedLinks': [{ + 'original': 'https://www.wnd.com/2019/09/buttigieg-god-will-punish-us-global-warming/', + 'expanded': 'https://www.wnd.com/2019/09/buttigieg-god-will-punish-us-global-warming/' + }], + 'link': 'https://www.wnd.com/2019/09/buttigieg-god-will-punish-us-global-warming/', + 'postUrl': 'https://www.facebook.com/WNDNews/posts/2736300513048855', + 'subscriberCount': 847184, + 'score': 5.123376623376624, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCDOi5uHIJ3tq_P&w=300&h=300&url=https%3A%2F%2Fwww.wnd.com%2Fwp-content%2Fuploads%2F2019%2F04%2Fmayor-pete44.jpg&cfs=1&sx=197&sy=0&sw=300&sh=300&_nc_hash=AQA6CnG84b2r7UwJ', + 'height': 300, + 'width': 300, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAKcW4hQUbLE4Nb&url=https%3A%2F%2Fwww.wnd.com%2Fwp-content%2Fuploads%2F2019%2F04%2Fmayor-pete44.jpg&_nc_hash=AQC4O0UNvIXSCQ9E' + }], + 'statistics': { + 'actual': { + 'likeCount': 59, + 'shareCount': 174, + 'commentCount': 701, + 'loveCount': 0, + 'wowCount': 25, + 'hahaCount': 408, + 'sadCount': 24, + 'angryCount': 187, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 83, + 'shareCount': 77, + 'commentCount': 52, + 'loveCount': 5, + 'wowCount': 11, + 'hahaCount': 12, + 'sadCount': 7, + 'angryCount': 61, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 816605, + 'name': 'WND', + 'handle': 'WNDNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/10616184_978685205477070_7301123703638589430_n.jpg?_nc_cat=110&_nc_oc=AQm5V5YpP7PucYw6lh5UcBTbvWDxAw3jNZpGGnOpem7RUhl7KQuT_0RFS9UItcAmqL8&_nc_ht=scontent.xx&oh=42799b825016837895356c7b53b45526&oe=5E0F6F64', + 'subscriberCount': 847147, + 'url': 'https://www.facebook.com/119984188013847', + 'platform': 'Facebook', + 'platformId': '119984188013847', + 'verified': False + } + }, { + 'id': 70170627801, + 'platformId': '273864989376427_2990195337743365', + 'platform': 'Facebook', + 'date': '2019-09-07 23:04:12', + 'updated': '2019-09-08 00:44:08', + 'type': 'link', + 'title': 'Pentagon takes money from military schools, more for border wall', + 'caption': 'msnbc.com', + 'description': 'The Donald Trump administration has stated plans to take more than $3.6 billion out of military projects including money that was supposed to pay for schools, and military bases, for a total of 127 projects put on hold, to divert monies to the U.S.-Mexico border wall. Joy Reid and her panel discuss.', + 'message': '"Republicans are discovering that their utter loyalty and utter obedience to Donald Trump will do absolutely nothing to stop him from stealing money out of their states… to build the border wall." - Joy Reid', + 'expandedLinks': [{ + 'original': 'https://on.msnbc.com/2LCCYNq', + 'expanded': 'https://www.msnbc.com/am-joy/watch/trump-admin-diverts-3-6b-in-military-funding-to-border-wall-68438085927?cid=sm_npd_ms_fb_ma&fbclid=IwAR3zIMmwFSYrPw7tXLvH09swHvwio_08X1N3e6olJ2VFajIqQzAhlcquifM' + }], + 'link': 'https://on.msnbc.com/2LCCYNq', + 'postUrl': 'https://www.facebook.com/msnbc/posts/2990195337743365', + 'subscriberCount': 2290452, + 'score': 5.096345514950166, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBmWxdfX-Y0YKeK&w=720&h=720&url=https%3A%2F%2Fmedia11.s-nbcnews.com%2Fj%2FMSNBC%2FComponents%2FVideo%2F201909%2Fn_joy_trumpwall_190907_1920x1080.nbcnews-fp-1200-630.jpg&cfs=1&_nc_hash=AQCq-YM-F5lNiiu7', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCRqhvaoLdT2olR&url=https%3A%2F%2Fmedia11.s-nbcnews.com%2Fj%2FMSNBC%2FComponents%2FVideo%2F201909%2Fn_joy_trumpwall_190907_1920x1080.nbcnews-fp-1200-630.jpg&_nc_hash=AQCBPi5HM3ivKDck' + }], + 'statistics': { + 'actual': { + 'likeCount': 205, + 'shareCount': 195, + 'commentCount': 299, + 'loveCount': 7, + 'wowCount': 22, + 'hahaCount': 110, + 'sadCount': 42, + 'angryCount': 654, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 72, + 'shareCount': 40, + 'commentCount': 80, + 'loveCount': 6, + 'wowCount': 12, + 'hahaCount': 27, + 'sadCount': 16, + 'angryCount': 48, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 8324, + 'name': 'MSNBC', + 'handle': 'msnbc', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/15741035_1414682885294626_1846918595507309997_n.jpg?_nc_cat=1&_nc_oc=AQmNSDImiJ4dNS4a9BuTF3tFyF2W0xSOLxgQfdY6R_AXaZm8hkQc6XT-GWy5NIEe080&_nc_ht=scontent.xx&oh=968e2c2f1d76f19278ac5985b55af46d&oe=5E003BB2', + 'subscriberCount': 2290512, + 'url': 'https://www.facebook.com/273864989376427', + 'platform': 'Facebook', + 'platformId': '273864989376427', + 'verified': True + } + }, { + 'id': 70172309273, + 'platformId': '5281959998_10152010138044999', + 'platform': 'Facebook', + 'date': '2019-09-07 23:30:12', + 'updated': '2019-09-08 00:47:27', + 'type': 'link', + 'title': 'Trump Says He’s Called Off Negotiations With Taliban After Afghanistan Bombing', + 'caption': 'nytimes.com', + 'description': 'The president said he had canceled a secret meeting at Camp David with Taliban leaders and the president of Afghanistan.', + 'message': 'President Trump said on Saturday that he had canceled a secret meeting at Camp David with Taliban leaders and the president of Afghanistan and has called off negotiations with the Afghan insurgent group that were close to a peace agreement months in the making.', + 'expandedLinks': [{ + 'original': 'https://www.nytimes.com/2019/09/07/us/politics/trump-taliban-afghanistan.html', + 'expanded': 'https://www.nytimes.com/2019/09/07/us/politics/trump-taliban-afghanistan.html' + }], + 'link': 'https://www.nytimes.com/2019/09/07/us/politics/trump-taliban-afghanistan.html', + 'postUrl': 'https://www.facebook.com/nytimes/posts/10152010138044999', + 'subscriberCount': 16854203, + 'score': 5.086956521739131, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDGhnO9QS8cKTun&w=720&h=720&url=https%3A%2F%2Fstatic01.nyt.com%2Fnewsgraphics%2Fimages%2Ficons%2FdefaultPromoCrop.png&cfs=1&_nc_hash=AQBIS03W5Q8pqINR', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBNcmcrufntg-Bu&url=https%3A%2F%2Fstatic01.nyt.com%2Fnewsgraphics%2Fimages%2Ficons%2FdefaultPromoCrop.png&_nc_hash=AQCcLkO5R7qvqmT3' + }], + 'statistics': { + 'actual': { + 'likeCount': 183, + 'shareCount': 214, + 'commentCount': 579, + 'loveCount': 1, + 'wowCount': 147, + 'hahaCount': 528, + 'sadCount': 18, + 'angryCount': 85, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 142, + 'shareCount': 52, + 'commentCount': 68, + 'loveCount': 10, + 'wowCount': 16, + 'hahaCount': 22, + 'sadCount': 16, + 'angryCount': 19, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 7132, + 'name': 'The New York Times', + 'handle': 'nytimes', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t34.0-1/p200x200/38987133_2766049203424553_1238434690_n.png?_nc_cat=1&_nc_log=1&_nc_oc=AQkaWRCuHf9GL6ACpzc33xhzk0PaoZZpZJjgHAUJqYB_x5SH2TI2LqBRTlosS59Dtlw&_nc_ht=scontent.xx&oh=6c30114417175d395e99d2e75167ad16&oe=5D765D57', + 'subscriberCount': 16854715, + 'url': 'https://www.facebook.com/5281959998', + 'platform': 'Facebook', + 'platformId': '5281959998', + 'verified': True + } + }, { + 'id': 70165217975, + 'platformId': '1435071773455316_2255764244719394', + 'platform': 'Facebook', + 'date': '2019-09-07 21:32:02', + 'updated': '2019-09-08 00:27:30', + 'type': 'link', + 'title': 'Democrats To Take Formal Steps On Impeachment Next Week', + 'caption': 'dailywire.com', + 'description': 'The Democrat-controlled House Judiciary Committee is set to vote next week on a resolution laying out the procedures that it will use for its investigation as they are reportedly considering moving to impeach President Donald Trump.', + 'message': '"It is expected to follow the precedent set in 1974 over the committee\'s procedures during then-President Richard Nixon\'s impeachment proceedings."', + 'expandedLinks': [{ + 'original': 'http://dlvr.it/RCgKr7', + 'expanded': 'https://www.dailywire.com/news/51495/breaking-democrats-take-formal-steps-impeachment-ryan-saavedra?utm_campaign=dwbrand' + }], + 'link': 'http://dlvr.it/RCgKr7', + 'postUrl': 'https://www.facebook.com/DailyWire/posts/2255764244719394', + 'subscriberCount': 1934539, + 'score': 5.064814814814815, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQAL34Moo0djcN3q&w=630&h=630&url=https%3A%2F%2Fwww.dailywire.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2Fopen_graph%2Fpublic%2Fuploads%2F2019%2F09%2Fjerry_nadler.jpg%3Fitok%3D0q7hDEmL&cfs=1&sx=179&sy=0&sw=630&sh=630&_nc_hash=AQDqkzXDIf7GjDhh', + 'height': 630, + 'width': 630, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQD5qWTKc9agh4Wt&url=https%3A%2F%2Fwww.dailywire.com%2Fsites%2Fdefault%2Ffiles%2Fstyles%2Fopen_graph%2Fpublic%2Fuploads%2F2019%2F09%2Fjerry_nadler.jpg%3Fitok%3D0q7hDEmL&_nc_hash=AQA8SoamYZ0MfJAJ' + }], + 'statistics': { + 'actual': { + 'likeCount': 31, + 'shareCount': 19, + 'commentCount': 220, + 'loveCount': 3, + 'wowCount': 4, + 'hahaCount': 153, + 'sadCount': 5, + 'angryCount': 112, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 30, + 'shareCount': 13, + 'commentCount': 21, + 'loveCount': 3, + 'wowCount': 6, + 'hahaCount': 16, + 'sadCount': 4, + 'angryCount': 15, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 650861, + 'name': 'Daily Wire', + 'handle': 'DailyWire', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/27655057_1815794295383060_2228253987427136016_n.png?_nc_cat=1&_nc_oc=AQm_uPD8ZwlgfmUIjiJBxewrWpNXIPkUpDdGdWdkYu9LXrRzIuUYx8pGdp5Kmcz1HU8&_nc_ht=scontent.xx&oh=ab8e2768dce63a6200349ce2d7dc8a11&oe=5DF6BB9F', + 'subscriberCount': 1934601, + 'url': 'https://www.facebook.com/1435071773455316', + 'platform': 'Facebook', + 'platformId': '1435071773455316', + 'verified': True + } + }, { + 'id': 70161881054, + 'platformId': '147772245840_10156546913410841', + 'platform': 'Facebook', + 'date': '2019-09-07 20:25:03', + 'updated': '2019-09-08 00:33:20', + 'type': 'link', + 'title': 'Kirstie Alley Calls Out Hollywood Hypocrites for Refusing to Work With Republicans', + 'caption': 'chicksonright.com', + 'description': "I remember Kirstie Alley best for It Takes Two (with Mary Kate and Ashley Olsen, age 9) and the TV movie Toothless, both of which came out in the mid-1990s. I couldn't tell you what she's been", + 'message': '🙌🙌🙌', + 'expandedLinks': [{ + 'original': 'https://www.chicksonright.com/opinion/2019/09/07/kirstie-alley-calls-out-hollywood-hypocrites-for-refusing-to-work-with-republicans/', + 'expanded': 'https://www.chicksonright.com/opinion/2019/09/07/kirstie-alley-calls-out-hollywood-hypocrites-for-refusing-to-work-with-republicans/' + }], + 'link': 'https://www.chicksonright.com/opinion/2019/09/07/kirstie-alley-calls-out-hollywood-hypocrites-for-refusing-to-work-with-republicans/', + 'postUrl': 'https://www.facebook.com/TheYoungCons/posts/10156546913410841', + 'subscriberCount': 999933, + 'score': 4.986301369863014, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQA-a89n7sUHEZ_k&w=720&h=720&url=https%3A%2F%2Fwww.chicksonright.com%2Fopinion%2Fwp-content%2Fuploads%2Fsites%2F6%2F2019%2F09%2FScreen-Shot-2019-09-07-at-11.57.14-AM.png&cfs=1&_nc_hash=AQDoIHiSAC6cMMwx', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDuGI7txbVqNakz&url=https%3A%2F%2Fwww.chicksonright.com%2Fopinion%2Fwp-content%2Fuploads%2Fsites%2F6%2F2019%2F09%2FScreen-Shot-2019-09-07-at-11.57.14-AM.png&_nc_hash=AQCiFQlBSx-BnMpT' + }], + 'statistics': { + 'actual': { + 'likeCount': 535, + 'shareCount': 62, + 'commentCount': 63, + 'loveCount': 58, + 'wowCount': 7, + 'hahaCount': 2, + 'sadCount': 0, + 'angryCount': 1, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 27, + 'shareCount': 19, + 'commentCount': 38, + 'loveCount': 3, + 'wowCount': 5, + 'hahaCount': 14, + 'sadCount': 4, + 'angryCount': 36, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 48734, + 'name': 'Young Conservatives', + 'handle': 'TheYoungCons', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/45427184_10155855954205841_8373169778709233664_n.jpg?_nc_cat=1&_nc_oc=AQmAgxZhqj9CXmiY228VRUATEHxELlR7p8BpNguYxOU_n6uxWw17ssXZSIF2mv2DreA&_nc_ht=scontent.xx&oh=ea10aeb60d4d31efb95e2c0a9f7ee098&oe=5DFE69A3', + 'subscriberCount': 1000057, + 'url': 'https://www.facebook.com/147772245840', + 'platform': 'Facebook', + 'platformId': '147772245840', + 'verified': False + } + }, { + 'id': 70168355148, + 'platformId': '10643211755_10158355674951756', + 'platform': 'Facebook', + 'date': '2019-09-07 22:32:03', + 'updated': '2019-09-08 00:23:24', + 'type': 'link', + 'title': "Opinion: Earth Has Survived Extinctions Before, It's Humans Who Are Fragile", + 'caption': 'npr.org', + 'description': "Earth has experienced cataclysmic life-destroying events before. NPR's Scott Simon reflects on what this means for humans in the midst of climate change.", + 'message': "Opinion: Earth has endured cataclysmic life-destroying events before. NPR's Scott Simon says it's us –– all the living things that inhabit it for a while –– who are fragile.", + 'expandedLinks': [{ + 'original': 'https://trib.al/gZNYTAo', + 'expanded': 'https://www.npr.org/2019/09/07/758448991/opinion-earth-has-survived-extinctions-before-its-humans-who-are-fragile?utm_source=facebook.com&utm_medium=social&utm_term=nprnews&utm_campaign=npr' + }], + 'link': 'https://trib.al/gZNYTAo', + 'postUrl': 'https://www.facebook.com/NPR/posts/10158355674951756', + 'subscriberCount': 6596234, + 'score': 4.9751724137931035, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBmP5_vhoAZ-PzY&w=720&h=720&url=https%3A%2F%2Fmedia.npr.org%2Fassets%2Fimg%2F2019%2F09%2F06%2Fgettyimages-1163083905_wide-aeecc15a41bef8f3ab6960ecdd682dd88366ce2b.jpg%3Fs%3D1400&cfs=1&_nc_hash=AQCSAlFPZxwX1a8K', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBfeWK671yKDWXM&url=https%3A%2F%2Fmedia.npr.org%2Fassets%2Fimg%2F2019%2F09%2F06%2Fgettyimages-1163083905_wide-aeecc15a41bef8f3ab6960ecdd682dd88366ce2b.jpg%3Fs%3D1400&_nc_hash=AQAwqEqQcFc87LZi' + }], + 'statistics': { + 'actual': { + 'likeCount': 2322, + 'shareCount': 727, + 'commentCount': 297, + 'loveCount': 108, + 'wowCount': 31, + 'hahaCount': 46, + 'sadCount': 72, + 'angryCount': 4, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 260, + 'shareCount': 161, + 'commentCount': 100, + 'loveCount': 15, + 'wowCount': 48, + 'hahaCount': 14, + 'sadCount': 79, + 'angryCount': 48, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 6149, + 'name': 'NPR', + 'handle': 'NPR', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/392453_10150756268711756_1078337478_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQkCimbrOrcgFhsAxAA1U5koNLGX9OLyOXdvEKxfRI0_6KYiFljw87Kls85nrj6clWA&_nc_ht=scontent.xx&oh=1883b0436c2dd854062b47c02250e87b&oe=5DF7D154', + 'subscriberCount': 6596236, + 'url': 'https://www.facebook.com/10643211755', + 'platform': 'Facebook', + 'platformId': '10643211755', + 'verified': True + } + }, { + 'id': 70169812970, + 'platformId': '9258148868_10156759046683869', + 'platform': 'Facebook', + 'date': '2019-09-07 22:45:06', + 'updated': '2019-09-08 00:45:29', + 'type': 'link', + 'title': 'William Steig’s Books Explored the Reality That Adults Don’t Want Children to Know About', + 'caption': 'newyorker.com', + 'description': 'Steig has a gift for stories that feel like fables or folktales, didactic forms that require a kind of frankness.', + 'message': 'Only a select handful of geniuses can manage to amuse both the kid being read to and the adult doing the reading. William Steig is one.', + 'expandedLinks': [{ + 'original': 'http://nyer.cm/exs5OOG', + 'expanded': 'https://www.newyorker.com/books/page-turner/william-steigs-books-explored-the-reality-that-adults-dont-want-children-to-know-about?utm_social-type=owned&utm_medium=social&utm_source=facebook&mbid=social_facebook&utm_brand=tny' + }], + 'link': 'http://nyer.cm/exs5OOG', + 'postUrl': 'https://www.facebook.com/newyorker/posts/10156759046683869', + 'subscriberCount': 4287168, + 'score': 4.913978494623656, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQC-HRIqGdFahnqE&w=720&h=720&url=https%3A%2F%2Fmedia.newyorker.com%2Fphotos%2F5cf818f247f7cc3a165070ad%2F16%3A9%2Fw_1200%2Ch_630%2Cc_limit%2FKidLit-SteigAmosandBoris-Social.jpg&cfs=1&_nc_hash=AQB-4qDEgnPqpwyJ', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQC6TrF4FdlN1wga&url=https%3A%2F%2Fmedia.newyorker.com%2Fphotos%2F5cf818f247f7cc3a165070ad%2F16%3A9%2Fw_1200%2Ch_630%2Cc_limit%2FKidLit-SteigAmosandBoris-Social.jpg&_nc_hash=AQDfMTbJzUyDjhsx' + }], + 'statistics': { + 'actual': { + 'likeCount': 264, + 'shareCount': 98, + 'commentCount': 19, + 'loveCount': 71, + 'wowCount': 2, + 'hahaCount': 3, + 'sadCount': 0, + 'angryCount': 0, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 33, + 'shareCount': 13, + 'commentCount': 9, + 'loveCount': 6, + 'wowCount': 4, + 'hahaCount': 7, + 'sadCount': 8, + 'angryCount': 13, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 10284, + 'name': 'The New Yorker', + 'handle': 'newyorker', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/1174822_10151878824588869_2070117374_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQno9Opk1N_2uuxM9xMCbaLh-8w7vk3rWYzY5iX2B0axGmTGyU1kkZY1RTndOiqUuAE&_nc_ht=scontent.xx&oh=e4a5a2194344ddb52a1e83254332bea3&oe=5DC7CED7', + 'subscriberCount': 4287325, + 'url': 'https://www.facebook.com/9258148868', + 'platform': 'Facebook', + 'platformId': '9258148868', + 'verified': True + } + }, { + 'id': 70171946856, + 'platformId': '182919686769_10156515246296770', + 'platform': 'Facebook', + 'date': '2019-09-07 23:30:09', + 'updated': '2019-09-08 00:40:44', + 'type': 'link', + 'title': 'Judge Rules In Favor Of CAIR, Says Terrorism Watchlist Violates Constitutional Rights', + 'caption': 'dailycaller.com', + 'description': "'Muslim registry'", + 'message': 'Wow.', + 'expandedLinks': [{ + 'original': 'https://dailycaller.com/2019/09/05/cair-watchlist-lawsuit-court-islam/', + 'expanded': 'https://dailycaller.com/2019/09/05/cair-watchlist-lawsuit-court-islam/' + }], + 'link': 'https://dailycaller.com/2019/09/05/cair-watchlist-lawsuit-court-islam/', + 'postUrl': 'https://www.facebook.com/DailyCaller/posts/10156515246296770', + 'subscriberCount': 5408428, + 'score': 4.911174785100287, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCzsDerqgeCLSlN&w=720&h=720&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f149ca9a410b2d40e794%2Fdbf3f811d3755e394f3369a84be4edc1ad58ca87_07dc201ff65866ba4d8a98a258bd2c75d6fadb3c_facebook&cfs=1&_nc_hash=AQC2WjhtmJYH0nh0', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBOEhLoSnrmguB7&url=https%3A%2F%2Fbuffer-media-uploads.s3.amazonaws.com%2F5d73f149ca9a410b2d40e794%2Fdbf3f811d3755e394f3369a84be4edc1ad58ca87_07dc201ff65866ba4d8a98a258bd2c75d6fadb3c_facebook&_nc_hash=AQC4PH3l5geEUt5p' + }], + 'statistics': { + 'actual': { + 'likeCount': 42, + 'shareCount': 303, + 'commentCount': 486, + 'loveCount': 0, + 'wowCount': 60, + 'hahaCount': 16, + 'sadCount': 25, + 'angryCount': 782, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 96, + 'shareCount': 47, + 'commentCount': 96, + 'loveCount': 8, + 'wowCount': 13, + 'hahaCount': 47, + 'sadCount': 6, + 'angryCount': 36, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13489, + 'name': 'The Daily Caller', + 'handle': 'DailyCaller', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/64424339_10156312814376770_465273119980912640_n.jpg?_nc_cat=1&_nc_oc=AQlHxNdXLPL0FRqcFH4XQeF2ZiciX5Ic44Qiv8lMVhD0omNcCl0urQzRDQkX_p83-HY&_nc_ht=scontent.xx&oh=4ffb2baf1a5bcbc577c7a9494b1bb16a&oe=5E0B1471', + 'subscriberCount': 5408115, + 'url': 'https://www.facebook.com/182919686769', + 'platform': 'Facebook', + 'platformId': '182919686769', + 'verified': True + } + }, { + 'id': 70160023793, + 'platformId': '86680728811_10158783301598812', + 'platform': 'Facebook', + 'date': '2019-09-07 19:47:06', + 'updated': '2019-09-08 00:45:25', + 'type': 'link', + 'title': 'To help Trump, GOP considering canceling at least 3 primaries and caucuses', + 'caption': 'abcnews.go.com', + 'description': ' ', + 'message': "At least three states are considering plans to cancel their 2020 GOP presidential primaries and caucuses, prompting both of President Donald J. Trump's long-shot primary opponents to rail against the stunning, but not unprecedented move that would virtually cripple their chances.", + 'expandedLinks': [{ + 'original': 'https://abcn.ws/2Luw50G', + 'expanded': 'https://abcnews.go.com/Politics/trump-gop-canceling-gop-primaries-caucuses/story?id=65436462&cid=social_fb_abcn' + }], + 'link': 'https://abcn.ws/2Luw50G', + 'postUrl': 'https://www.facebook.com/ABCNews/posts/10158783301598812', + 'subscriberCount': 14195962, + 'score': 4.905707196029777, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?d=AQBPkzmGXGnYi4PV&w=558&h=558&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FPolitics%2Ftrump-gop-01-as-190906_hpMain_16x9_992.jpg&cfs=1&sx=434&sy=0&sw=558&sh=558&_nc_hash=AQCKoUX8TXoRMeqA', + 'height': 558, + 'width': 558, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQDGWHKJw8aw1GU2&url=https%3A%2F%2Fs.abcnews.com%2Fimages%2FPolitics%2Ftrump-gop-01-as-190906_hpMain_16x9_992.jpg&_nc_hash=AQBfcfAqZ3cLCQDZ' + }], + 'statistics': { + 'actual': { + 'likeCount': 229, + 'shareCount': 281, + 'commentCount': 447, + 'loveCount': 26, + 'wowCount': 81, + 'hahaCount': 141, + 'sadCount': 36, + 'angryCount': 736, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 171, + 'shareCount': 81, + 'commentCount': 65, + 'loveCount': 20, + 'wowCount': 21, + 'hahaCount': 17, + 'sadCount': 17, + 'angryCount': 11, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 13878, + 'name': 'ABC News', + 'handle': 'ABCNews', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/49603531_10158020022298812_7115988832050216960_n.jpg?_nc_cat=1&_nc_log=1&_nc_oc=AQn2Ghv2vLps15SQcVrGtTiEDJ-b5vJM4eJjywLNyGEaoQxoQo4B8vgY0GCUBSkfQqU&_nc_ht=scontent.xx&oh=cac6339a847fd884c058cd8e762c4052&oe=5DFD2D02', + 'subscriberCount': 14196629, + 'url': 'https://www.facebook.com/86680728811', + 'platform': 'Facebook', + 'platformId': '86680728811', + 'verified': True + } + }, { + 'id': 70159920653, + 'platformId': '83865976093_10156707520461094', + 'platform': 'Facebook', + 'date': '2019-09-07 19:50:00', + 'updated': '2019-09-08 00:42:30', + 'type': 'link', + 'title': 'US Beekeepers File Suit Against EPA Charging "Illegal" Approval of Insecticide', + 'caption': 'truthout.org', + 'description': 'Scientists have warned that sulfoxaflor is part of the massive pollinator die-off across the U.S.', + 'message': "The lawsuit charges that the EPA's approval of sulfoxaflor was illegally rendered as it put industry interests ahead of the health of pollinators and ignored the available science.", + 'expandedLinks': [{ + 'original': 'https://truthout.org/articles/us-beekeepers-file-suit-against-epa-charging-illegalapproval-of-insecticide/', + 'expanded': 'https://truthout.org/articles/us-beekeepers-file-suit-against-epa-charging-illegalapproval-of-insecticide/' + }], + 'link': 'https://truthout.org/articles/us-beekeepers-file-suit-against-epa-charging-illegalapproval-of-insecticide/', + 'postUrl': 'https://www.facebook.com/truthout/posts/10156707520461094', + 'subscriberCount': 754215, + 'score': 4.839160839160839, + 'media': [{ + 'type': 'photo', + 'url': 'https://external.xx.fbcdn.net/safe_image.php?w=720&h=720&url=https%3A%2F%2Ftruthout.org%2Fwp-content%2Fuploads%2F2019%2F09%2F19476616_0bfa08ace9_o.jpg&cfs=1&_nc_hash=AQBpmlkNTDdVJqxR', + 'height': 720, + 'width': 720, + 'full': 'https://external.xx.fbcdn.net/safe_image.php?d=AQCzPuIuejiQOYBx&url=https%3A%2F%2Ftruthout.org%2Fwp-content%2Fuploads%2F2019%2F09%2F19476616_0bfa08ace9_o.jpg&_nc_hash=AQCwh3_YUN6BH0z-' + }], + 'statistics': { + 'actual': { + 'likeCount': 389, + 'shareCount': 212, + 'commentCount': 10, + 'loveCount': 62, + 'wowCount': 9, + 'hahaCount': 0, + 'sadCount': 5, + 'angryCount': 5, + 'thankfulCount': 0 + }, + 'expected': { + 'likeCount': 30, + 'shareCount': 42, + 'commentCount': 11, + 'loveCount': 5, + 'wowCount': 6, + 'hahaCount': 3, + 'sadCount': 11, + 'angryCount': 35, + 'thankfulCount': 0 + } + }, + 'account': { + 'id': 4007, + 'name': 'Truthout', + 'handle': 'truthout', + 'profileImage': 'https://scontent.xx.fbcdn.net/v/t1.0-1/p200x200/19894613_10154795655481094_2383393652303893841_n.jpg?_nc_cat=110&_nc_oc=AQkTbiRpAD3hZBOdyzgT1PgAhh4VQwvgi7_UrWwWRSAE_kE9X6Vo3lxn9jEYjOQ71yY&_nc_ht=scontent.xx&oh=a8046506973cb0fdb4deab6119ed03f5&oe=5DF988DD', + 'subscriberCount': 754194, + 'url': 'https://www.facebook.com/83865976093', + 'platform': 'Facebook', + 'platformId': '83865976093', + 'verified': True + } + }], + 'pagination': { + } + } } diff --git a/test/test_crowdtangle/test_crowdtangle.py b/test/test_crowdtangle/test_crowdtangle.py index da12ebba81..9adb06637b 100644 --- a/test/test_crowdtangle/test_crowdtangle.py +++ b/test/test_crowdtangle/test_crowdtangle.py @@ -4,12 +4,14 @@ from test.test_crowdtangle.post import expected_posts from test.test_crowdtangle.leaderboard import expected_leaderboard from test.test_crowdtangle.link_post import expected_post -from parsons import CrowdTangle, Table +from parsons.crowdtangle import CrowdTangle +from parsons.etl import Table -CT_API_KEY = "FAKE_KEY" +CT_API_KEY = 'FAKE_KEY' class TestCrowdTangle(unittest.TestCase): + def setUp(self): self.ct = CrowdTangle(CT_API_KEY) @@ -17,25 +19,23 @@ def setUp(self): @requests_mock.Mocker() def test_get_posts(self, m): - m.get(self.ct.uri + "/posts", json=expected_posts) + m.get(self.ct.uri + '/posts', json=expected_posts) posts = self.ct.get_posts() - exp_tbl = self.ct._unpack(Table(expected_posts["result"]["posts"])) + exp_tbl = self.ct._unpack(Table(expected_posts['result']['posts'])) assert_matching_tables(posts, exp_tbl) @requests_mock.Mocker() def test_get_leaderboard(self, m): - m.get(self.ct.uri + "/leaderboard", json=expected_leaderboard) + m.get(self.ct.uri + '/leaderboard', json=expected_leaderboard) leaderboard = self.ct.get_leaderboard() - exp_tbl = self.ct._unpack( - Table(expected_leaderboard["result"]["accountStatistics"]) - ) + exp_tbl = self.ct._unpack(Table(expected_leaderboard['result']['accountStatistics'])) assert_matching_tables(leaderboard, exp_tbl) @requests_mock.Mocker() def test_get_links(self, m): - m.get(self.ct.uri + "/links", json=expected_post) - post = self.ct.get_links(link="https://nbcnews.to/34stfC2") - exp_tbl = self.ct._unpack(Table(expected_post["result"]["posts"])) + m.get(self.ct.uri + '/links', json=expected_post) + post = self.ct.get_links(link='https://nbcnews.to/34stfC2') + exp_tbl = self.ct._unpack(Table(expected_post['result']['posts'])) assert_matching_tables(post, exp_tbl) diff --git a/test/test_databases/fakes.py b/test/test_databases/fakes.py index 049bba960b..1f87c924ae 100644 --- a/test/test_databases/fakes.py +++ b/test/test_databases/fakes.py @@ -11,38 +11,36 @@ def __init__(self): def setup_table(self, table_name, data, failures=0): self.table_map[table_name] = { - "failures": failures, - "table": FakeTable(table_name, data), + 'failures': failures, + 'table': FakeTable(table_name, data) } - return self.table_map[table_name]["table"] + return self.table_map[table_name]['table'] def table(self, table_name): if table_name not in self.table_map: self.setup_table(table_name, None) - return self.table_map[table_name]["table"] + return self.table_map[table_name]['table'] def copy(self, data, table_name, **kwargs): - logger.info("Copying %s rows", data.num_rows) + logger.info('Copying %s rows', data.num_rows) if table_name not in self.table_map: self.setup_table(table_name, Table()) - if self.table_map[table_name]["table"].data is None: - self.table_map[table_name]["table"].data = Table() + if self.table_map[table_name]['table'].data is None: + self.table_map[table_name]['table'].data = Table() - if self.table_map[table_name]["failures"] > 0: - self.table_map[table_name]["failures"] -= 1 - raise ValueError("Canned error") + if self.table_map[table_name]['failures'] > 0: + self.table_map[table_name]['failures'] -= 1 + raise ValueError('Canned error') - self.copy_call_args.append( - { - "data": data, - "table_name": table_name, - "kwargs": kwargs, - } - ) + self.copy_call_args.append({ + 'data': data, + 'table_name': table_name, + 'kwargs': kwargs, + }) - tbl = self.table_map[table_name]["table"] + tbl = self.table_map[table_name]['table'] tbl.data.concat(data) @@ -98,7 +96,7 @@ def get_rows(self, offset=0, chunk_size=None, order_by=None): if order_by: data.sort(order_by) - return Table(data[offset : chunk_size + offset]) + return Table(data[offset:chunk_size + offset]) def get_new_rows_count(self, primary_key_col, start_value=None): data = self.data.select_rows(lambda row: row[primary_key_col] > start_value) @@ -108,4 +106,4 @@ def get_new_rows(self, primary_key, cutoff_value, offset=0, chunk_size=None): data = self.data.select_rows(lambda row: row[primary_key] > cutoff_value) data.sort(primary_key) - return Table(data[offset : chunk_size + offset]) + return Table(data[offset:chunk_size + offset]) diff --git a/test/test_databases/test_database.py b/test/test_databases/test_database.py index b49491e687..93ac82fd49 100644 --- a/test/test_databases/test_database.py +++ b/test/test_databases/test_database.py @@ -1,12 +1,5 @@ from parsons.databases.database.constants import ( - SMALLINT, - MEDIUMINT, - INT, - BIGINT, - FLOAT, - BOOL, - VARCHAR, -) + SMALLINT, MEDIUMINT, INT, BIGINT, FLOAT, BOOL, VARCHAR) from parsons.databases.database.database import DatabaseCreateStatement @@ -28,150 +21,137 @@ def dcs_bool(): @pytest.mark.parametrize( ("int1", "int2", "higher"), - ( - (SMALLINT, SMALLINT, SMALLINT), - (SMALLINT, MEDIUMINT, MEDIUMINT), - (SMALLINT, INT, INT), - (SMALLINT, BIGINT, BIGINT), - (MEDIUMINT, SMALLINT, MEDIUMINT), - (MEDIUMINT, MEDIUMINT, MEDIUMINT), - (MEDIUMINT, INT, INT), - (MEDIUMINT, BIGINT, BIGINT), - (INT, SMALLINT, INT), - (INT, MEDIUMINT, INT), - (INT, INT, INT), - (INT, BIGINT, BIGINT), - (BIGINT, SMALLINT, BIGINT), - (BIGINT, MEDIUMINT, BIGINT), - (BIGINT, INT, BIGINT), - (BIGINT, BIGINT, BIGINT), - (None, BIGINT, BIGINT), - (INT, None, INT), - ), -) + ((SMALLINT, SMALLINT, SMALLINT), + (SMALLINT, MEDIUMINT, MEDIUMINT), + (SMALLINT, INT, INT), + (SMALLINT, BIGINT, BIGINT), + (MEDIUMINT, SMALLINT, MEDIUMINT), + (MEDIUMINT, MEDIUMINT, MEDIUMINT), + (MEDIUMINT, INT, INT), + (MEDIUMINT, BIGINT, BIGINT), + (INT, SMALLINT, INT), + (INT, MEDIUMINT, INT), + (INT, INT, INT), + (INT, BIGINT, BIGINT), + (BIGINT, SMALLINT, BIGINT), + (BIGINT, MEDIUMINT, BIGINT), + (BIGINT, INT, BIGINT), + (BIGINT, BIGINT, BIGINT), + (None, BIGINT, BIGINT), + (INT, None, INT), + )) def test_get_bigger_int(dcs, int1, int2, higher): assert dcs.get_bigger_int(int1, int2) == higher @pytest.mark.parametrize( ("val", "is_valid"), - ( - (10, True), - (1_0, True), - (+10, True), - (+1_0, True), - (1.2, True), - (1.0_0, True), - (1.0, True), - (1_0.0, True), - (+1.2, True), - (+1.0, True), - (+1.0_0, True), - (0, True), - (0.0, True), - ("10", True), - ("1_0", False), - ("+10", True), - ("+1_0", False), - ("1.2", True), - ("1.0_0", False), - ("1.", True), - ("1_0.", False), - ("+1.2", True), - ("+1.", True), - ("+1.0_0", False), - ("0", True), - ("0.0", True), - (True, False), - ("True", False), - ("a string", False), - ({}, False), - ([], False), - ([], False), - (None, False), - ), -) + ((10, True), + (1_0, True), + (+10, True), + (+1_0, True), + (1.2, True), + (1.0_0, True), + (1., True), + (1_0., True), + (+1.2, True), + (+1., True), + (+1.0_0, True), + (0, True), + (0.0, True), + ("10", True), + ("1_0", False), + ("+10", True), + ("+1_0", False), + ("1.2", True), + ("1.0_0", False), + ("1.", True), + ("1_0.", False), + ("+1.2", True), + ("+1.", True), + ("+1.0_0", False), + ("0", True), + ("0.0", True), + (True, False), + ("True", False), + ("a string", False), + ({}, False), + ([], False), + ([], False), + (None, False), + )) def test_is_valid_sql_num(dcs, val, is_valid): assert dcs.is_valid_sql_num(val) == is_valid @pytest.mark.parametrize( ("val", "cmp_type", "detected_type"), - ( - (1, None, SMALLINT), - (1, "", SMALLINT), - (1, MEDIUMINT, MEDIUMINT), - (32769, None, MEDIUMINT), - (32769, BIGINT, BIGINT), - (2147483648, None, BIGINT), - (2147483648, FLOAT, FLOAT), - (5.001, None, FLOAT), - (5.001, "", FLOAT), - ("FALSE", VARCHAR, VARCHAR), - ("word", "", VARCHAR), - ("word", INT, VARCHAR), - ("1_2", BIGINT, VARCHAR), - ("01", FLOAT, VARCHAR), - ("00001", None, VARCHAR), - ("word", None, VARCHAR), - ("1_2", None, VARCHAR), - ("01", None, VARCHAR), - ("{}", None, VARCHAR), - ), -) + ((1, None, SMALLINT), + (1, "", SMALLINT), + (1, MEDIUMINT, MEDIUMINT), + (32769, None, MEDIUMINT), + (32769, BIGINT, BIGINT), + (2147483648, None, BIGINT), + (2147483648, FLOAT, FLOAT), + (5.001, None, FLOAT), + (5.001, "", FLOAT), + ("FALSE", VARCHAR, VARCHAR), + ("word", "", VARCHAR), + ("word", INT, VARCHAR), + ("1_2", BIGINT, VARCHAR), + ("01", FLOAT, VARCHAR), + ("00001", None, VARCHAR), + ("word", None, VARCHAR), + ("1_2", None, VARCHAR), + ("01", None, VARCHAR), + ("{}", None, VARCHAR), + )) def test_detect_data_type(dcs, val, cmp_type, detected_type): assert dcs.detect_data_type(val, cmp_type) == detected_type @pytest.mark.parametrize( ("val", "cmp_type", "detected_type"), - ( - (2, None, SMALLINT), - (2, "", SMALLINT), - (1, MEDIUMINT, MEDIUMINT), - (2, BOOL, SMALLINT), - (True, None, BOOL), - (0, None, BOOL), - (1, None, BOOL), - (1, BOOL, BOOL), - ("F", None, BOOL), - ("FALSE", None, BOOL), - ("Yes", None, BOOL), - ), -) + ((2, None, SMALLINT), + (2, "", SMALLINT), + (1, MEDIUMINT, MEDIUMINT), + (2, BOOL, SMALLINT), + (True, None, BOOL), + (0, None, BOOL), + (1, None, BOOL), + (1, BOOL, BOOL), + ("F", None, BOOL), + ("FALSE", None, BOOL), + ("Yes", None, BOOL) + )) def test_detect_data_type_bools(dcs_bool, val, cmp_type, detected_type): assert dcs_bool.detect_data_type(val, cmp_type) == detected_type @pytest.mark.parametrize( ("col", "renamed"), - ( - ("a", "a"), - ("A", "a"), - ("", "_"), - ("SELECT", "select_"), - ("two words", "two_words"), - (" trailing space ", "trailing_space"), - ("1234567890", "x_1234567890"), - ("0word", "x_0word"), - # create a really long column name - # len("asdfghjkla" * 13) == 130 - # len("asdfghjkla" * 10) == 100 - ("asdfghjkla" * 13, "asdfghjkla" * 10), - ), -) + (("a", "a"), + ("A", "a"), + ("", "_"), + ("SELECT", "select_"), + ("two words", "two_words"), + (" trailing space ", "trailing_space"), + ("1234567890", "x_1234567890"), + ("0word", "x_0word"), + + # create a really long column name + # len("asdfghjkla" * 13) == 130 + # len("asdfghjkla" * 10) == 100 + ("asdfghjkla" * 13, "asdfghjkla" * 10), + )) def test_default_format_column(dcs, col, renamed): assert dcs.format_column(col) == renamed @pytest.mark.parametrize( ("cols", "cols_formatted"), - ( - ( - ["a", "A", "b", " b ", "col name", "col_name"], - ["a", "a_1", "b", "b_3", "col_name", "col_name_5"], - ), - ), -) + ((["a", "A", "b", " b ", "col name", "col_name"], + ["a", "a_1", "b", "b_3", "col_name", "col_name_5"]), + )) def test_default_format_columns(dcs, cols, cols_formatted): assert dcs.format_columns(cols) == cols_formatted diff --git a/test/test_databases/test_dbsync.py b/test/test_databases/test_dbsync.py index bcfc1c0781..f17ea37111 100644 --- a/test/test_databases/test_dbsync.py +++ b/test/test_databases/test_dbsync.py @@ -6,15 +6,14 @@ _dir = os.path.dirname(__file__) -TEMP_SCHEMA = "parsons_test" +TEMP_SCHEMA = 'parsons_test' # These tests interact directly with the Postgres database. In order to run, set the # env to LIVE_TEST='TRUE'. -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test') class TestPostgresDBSync(unittest.TestCase): + def setUp(self): self.temp_schema = TEMP_SCHEMA @@ -28,11 +27,11 @@ def setUp(self): self.db.query(setup_sql) # Load dummy data to parsons tables - self.table1 = Table.from_csv(f"{_dir}/test_data/sample_table_1.csv") - self.table2 = Table.from_csv(f"{_dir}/test_data/sample_table_2.csv") + self.table1 = Table.from_csv(f'{_dir}/test_data/sample_table_1.csv') + self.table2 = Table.from_csv(f'{_dir}/test_data/sample_table_2.csv') # Create source table - self.db.copy(self.table1, f"{self.temp_schema}.source") + self.db.copy(self.table1, f'{self.temp_schema}.source') # Create DB Sync object self.db_sync = DBSync(self.db, self.db) @@ -48,9 +47,8 @@ def tearDown(self): def test_table_sync_full_drop(self): # Test a db sync with drop. - self.db_sync.table_sync_full( - f"{self.temp_schema}.source", f"{self.temp_schema}.destination" - ) + self.db_sync.table_sync_full(f'{self.temp_schema}.source', + f'{self.temp_schema}.destination') source = self.db.query(f"SELECT * FROM {self.temp_schema}.source") destination = self.db.query(f"SELECT * FROM {self.temp_schema}.destination") @@ -60,10 +58,7 @@ def test_table_sync_full_truncate(self): # Test a db sync with truncate. self.db_sync.table_sync_full( - f"{self.temp_schema}.source", - f"{self.temp_schema}.destination", - if_exists="truncate", - ) + f'{self.temp_schema}.source', f'{self.temp_schema}.destination', if_exists='truncate') source = self.db.query(f"SELECT * FROM {self.temp_schema}.source") destination = self.db.query(f"SELECT * FROM {self.temp_schema}.destination") assert_matching_tables(source, destination) @@ -76,16 +71,14 @@ def test_table_sync_full_empty_table(self): # Attempt to sync. self.db_sync.table_sync_full( - f"{self.temp_schema}.source", f"{self.temp_schema}.destination" - ) + f'{self.temp_schema}.source', f'{self.temp_schema}.destination') def test_table_sync_full_chunk(self): # Test chunking in full sync. self.db_sync.chunk_size = 10 - self.db_sync.table_sync_full( - f"{self.temp_schema}.source", f"{self.temp_schema}.destination" - ) + self.db_sync.table_sync_full(f'{self.temp_schema}.source', + f'{self.temp_schema}.destination') source = self.db.query(f"SELECT * FROM {self.temp_schema}.source") destination = self.db.query(f"SELECT * FROM {self.temp_schema}.destination") @@ -94,11 +87,11 @@ def test_table_sync_full_chunk(self): def test_table_sync_incremental(self): # Test that incremental sync - self.db.copy(self.table1, f"{self.temp_schema}.destination") - self.db.copy(self.table2, f"{self.temp_schema}.source", if_exists="append") - self.db_sync.table_sync_incremental( - f"{self.temp_schema}.source", f"{self.temp_schema}.destination", "pk" - ) + self.db.copy(self.table1, f'{self.temp_schema}.destination') + self.db.copy(self.table2, f'{self.temp_schema}.source', if_exists='append') + self.db_sync.table_sync_incremental(f'{self.temp_schema}.source', + f'{self.temp_schema}.destination', + 'pk') count1 = self.db.query(f"SELECT * FROM {self.temp_schema}.source") count2 = self.db.query(f"SELECT * FROM {self.temp_schema}.destination") @@ -108,11 +101,11 @@ def test_table_sync_incremental_chunk(self): # Test chunking of incremental sync. self.db_sync.chunk_size = 10 - self.db.copy(self.table1, f"{self.temp_schema}.destination") - self.db.copy(self.table2, f"{self.temp_schema}.source", if_exists="append") - self.db_sync.table_sync_incremental( - f"{self.temp_schema}.source", f"{self.temp_schema}.destination", "pk" - ) + self.db.copy(self.table1, f'{self.temp_schema}.destination') + self.db.copy(self.table2, f'{self.temp_schema}.source', if_exists='append') + self.db_sync.table_sync_incremental(f'{self.temp_schema}.source', + f'{self.temp_schema}.destination', + 'pk') count1 = self.db.query(f"SELECT * FROM {self.temp_schema}.source") count2 = self.db.query(f"SELECT * FROM {self.temp_schema}.destination") @@ -121,9 +114,9 @@ def test_table_sync_incremental_chunk(self): def test_table_sync_incremental_create_destination_table(self): # Test that an incremental sync works if the destination table does not exist. - self.db_sync.table_sync_incremental( - f"{self.temp_schema}.source", f"{self.temp_schema}.destination", "pk" - ) + self.db_sync.table_sync_incremental(f'{self.temp_schema}.source', + f'{self.temp_schema}.destination', + 'pk') count1 = self.db.query(f"SELECT * FROM {self.temp_schema}.source") count2 = self.db.query(f"SELECT * FROM {self.temp_schema}.destination") @@ -136,18 +129,16 @@ def test_table_sync_incremental_empty_table(self): self.db.query(f"TRUNCATE {self.temp_schema}.source") # Attempt to sync. - self.db_sync.table_sync_incremental( - f"{self.temp_schema}.source", f"{self.temp_schema}.destination", "pk" - ) + self.db_sync.table_sync_incremental(f'{self.temp_schema}.source', + f'{self.temp_schema}.destination', + 'pk') # These tests interact directly with the Postgres database. In order to run, set the # env to LIVE_TEST='TRUE'. -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test') class TestRedshiftDBSync(TestPostgresDBSync): - """This test inherits all of the tests from the Postgres test.""" + """ This test inherits all of the tests from the Postgres test. """ def setUp(self): @@ -162,65 +153,60 @@ def setUp(self): self.db.query(setup_sql) # Load dummy data to parsons tables - self.table1 = Table.from_csv(f"{_dir}/test_data/sample_table_1.csv") - self.table2 = Table.from_csv(f"{_dir}/test_data/sample_table_2.csv") + self.table1 = Table.from_csv(f'{_dir}/test_data/sample_table_1.csv') + self.table2 = Table.from_csv(f'{_dir}/test_data/sample_table_2.csv') # Create source table - self.db.copy(self.table1, f"{self.temp_schema}.source") + self.db.copy(self.table1, f'{self.temp_schema}.source') # Create DB Sync object self.db_sync = DBSync(self.db, self.db) class TestFakeDBSync(unittest.TestCase): + def setUp(self): self.fake_source = FakeDatabase() self.fake_destination = FakeDatabase() def test_table_sync_full(self): dbsync = DBSync(self.fake_source, self.fake_destination) - source_data = Table( - [ - {"id": 1, "value": 11}, - {"id": 2, "value": 121142}, - {"id": 3, "value": 111}, - {"id": 4, "value": 12211}, - {"id": 5, "value": 1231}, - ] - ) - self.fake_source.setup_table("source", source_data) + source_data = Table([ + {'id': 1, 'value': 11}, + {'id': 2, 'value': 121142}, + {'id': 3, 'value': 111}, + {'id': 4, 'value': 12211}, + {'id': 5, 'value': 1231}, + ]) + self.fake_source.setup_table('source', source_data) - dbsync.table_sync_full("source", "destination") + dbsync.table_sync_full('source', 'destination') - destination = self.fake_destination.table("destination") + destination = self.fake_destination.table('destination') # Make sure the data came through assert_matching_tables(source_data, destination.data) def test_table_sync_incremental(self): dbsync = DBSync(self.fake_source, self.fake_destination) - source_data = Table( - [ - {"id": 1, "value": 11}, - {"id": 2, "value": 121142}, - {"id": 3, "value": 111}, - {"id": 4, "value": 12211}, - {"id": 5, "value": 1231}, - ] - ) - self.fake_source.setup_table("source", source_data) + source_data = Table([ + {'id': 1, 'value': 11}, + {'id': 2, 'value': 121142}, + {'id': 3, 'value': 111}, + {'id': 4, 'value': 12211}, + {'id': 5, 'value': 1231}, + ]) + self.fake_source.setup_table('source', source_data) # Start with one row - destination_data = Table( - [ - {"id": 1, "value": 11}, - ] - ) - self.fake_destination.setup_table("destination", destination_data) + destination_data = Table([ + {'id': 1, 'value': 11}, + ]) + self.fake_destination.setup_table('destination', destination_data) - dbsync.table_sync_incremental("source", "destination", "id") + dbsync.table_sync_incremental('source', 'destination', 'id') - destination = self.fake_destination.table("destination") + destination = self.fake_destination.table('destination') # Make sure the rest of the data came through assert_matching_tables(source_data, destination.data) @@ -228,20 +214,18 @@ def test_table_sync_incremental(self): def test_table_sync_full_with_retry(self): # Setup the dbsync with two retries dbsync = DBSync(self.fake_source, self.fake_destination, retries=2) - source_data = Table( - [ - {"id": 1, "value": 11}, - {"id": 2, "value": 121142}, - ] - ) - self.fake_source.setup_table("source", source_data) + source_data = Table([ + {'id': 1, 'value': 11}, + {'id': 2, 'value': 121142}, + ]) + self.fake_source.setup_table('source', source_data) # Have the copy fail twice - self.fake_destination.setup_table("destination", Table(), failures=2) + self.fake_destination.setup_table('destination', Table(), failures=2) - dbsync.table_sync_full("source", "destination") + dbsync.table_sync_full('source', 'destination') - destination = self.fake_destination.table("destination") + destination = self.fake_destination.table('destination') # Make sure all of the data still came through assert_matching_tables(source_data, destination.data) @@ -249,100 +233,80 @@ def test_table_sync_full_with_retry(self): def test_table_sync_full_without_retry(self): # Setup the dbsync with no retries dbsync = DBSync(self.fake_source, self.fake_destination, retries=0) - source_data = Table( - [ - {"id": 1, "value": 11}, - {"id": 2, "value": 121142}, - ] - ) - self.fake_source.setup_table("source", source_data) + source_data = Table([ + {'id': 1, 'value': 11}, + {'id': 2, 'value': 121142}, + ]) + self.fake_source.setup_table('source', source_data) # Have the copy fail once - self.fake_destination.setup_table("destination", Table(), failures=1) + self.fake_destination.setup_table('destination', Table(), failures=1) # Make sure the sync results in an exception - self.assertRaises( - ValueError, lambda: dbsync.table_sync_full("source", "destination") - ) + self.assertRaises(ValueError, lambda: dbsync.table_sync_full('source', 'destination')) def test_table_sync_full_order_by(self): dbsync = DBSync(self.fake_source, self.fake_destination) - source_data = Table( - [ - {"id": 1, "value": 21}, - {"id": 2, "value": 121142}, - {"id": 3, "value": 1}, - ] - ) - self.fake_source.setup_table("source", source_data) - self.fake_destination.setup_table("destination", Table()) + source_data = Table([ + {'id': 1, 'value': 21}, + {'id': 2, 'value': 121142}, + {'id': 3, 'value': 1}, + ]) + self.fake_source.setup_table('source', source_data) + self.fake_destination.setup_table('destination', Table()) - dbsync.table_sync_full("source", "destination", order_by="value") + dbsync.table_sync_full('source', 'destination', order_by='value') - destination = self.fake_destination.table("destination") + destination = self.fake_destination.table('destination') # Check that the rows were inserted in the expected order - self.assertEqual(destination.data[0]["id"], 3) - self.assertEqual(destination.data[1]["id"], 1) - self.assertEqual(destination.data[2]["id"], 2) + self.assertEqual(destination.data[0]['id'], 3) + self.assertEqual(destination.data[1]['id'], 1) + self.assertEqual(destination.data[2]['id'], 2) def test_table_sync_full_read_chunk(self): dbsync = DBSync(self.fake_source, self.fake_destination, read_chunk_size=2) - source_data = Table( - [ - {"id": 1, "value": 11}, - {"id": 2, "value": 121142}, - {"id": 3, "value": 111}, - {"id": 4, "value": 12211}, - {"id": 5, "value": 1231}, - ] - ) - self.fake_source.setup_table("source", source_data) + source_data = Table([ + {'id': 1, 'value': 11}, + {'id': 2, 'value': 121142}, + {'id': 3, 'value': 111}, + {'id': 4, 'value': 12211}, + {'id': 5, 'value': 1231}, + ]) + self.fake_source.setup_table('source', source_data) - dbsync.table_sync_full("source", "destination") + dbsync.table_sync_full('source', 'destination') - destination = self.fake_destination.table("destination") + destination = self.fake_destination.table('destination') # Make sure the data came through assert_matching_tables(source_data, destination.data) # Make sure copy was called the expected number of times # read chunks of 2, 5 rows to write.. should be 3 copy calls - self.assertEqual( - len(self.fake_destination.copy_call_args), - 3, - self.fake_destination.copy_call_args, - ) + self.assertEqual(len(self.fake_destination.copy_call_args), 3, + self.fake_destination.copy_call_args) def test_table_sync_full_write_chunk(self): - dbsync = DBSync( - self.fake_source, - self.fake_destination, - read_chunk_size=1, - write_chunk_size=3, - ) - source_data = Table( - [ - {"id": 1, "value": 11}, - {"id": 2, "value": 121142}, - {"id": 3, "value": 111}, - {"id": 4, "value": 12211}, - {"id": 5, "value": 1231}, - ] - ) - self.fake_source.setup_table("source", source_data) - - dbsync.table_sync_full("source", "destination") - - destination = self.fake_destination.table("destination") + dbsync = DBSync(self.fake_source, self.fake_destination, read_chunk_size=1, + write_chunk_size=3) + source_data = Table([ + {'id': 1, 'value': 11}, + {'id': 2, 'value': 121142}, + {'id': 3, 'value': 111}, + {'id': 4, 'value': 12211}, + {'id': 5, 'value': 1231}, + ]) + self.fake_source.setup_table('source', source_data) + + dbsync.table_sync_full('source', 'destination') + + destination = self.fake_destination.table('destination') # Make sure the data came through assert_matching_tables(source_data, destination.data) # Make sure copy was called the expected number of times # write chunks of 3, 5 rows to write.. should be 2 copy calls - self.assertEqual( - len(self.fake_destination.copy_call_args), - 2, - self.fake_destination.copy_call_args, - ) + self.assertEqual(len(self.fake_destination.copy_call_args), 2, + self.fake_destination.copy_call_args) diff --git a/test/test_databases/test_discover_database.py b/test/test_databases/test_discover_database.py deleted file mode 100644 index b946629e10..0000000000 --- a/test/test_databases/test_discover_database.py +++ /dev/null @@ -1,116 +0,0 @@ -import unittest -from unittest.mock import patch -from parsons.databases.redshift import Redshift -from parsons.databases.mysql import MySQL -from parsons.databases.postgres import Postgres -from parsons.google.google_bigquery import GoogleBigQuery -from parsons.databases.discover_database import discover_database - - -class TestDiscoverDatabase(unittest.TestCase): - @patch.object(GoogleBigQuery, "__init__", return_value=None) - @patch.object(Postgres, "__init__", return_value=None) - @patch.object(MySQL, "__init__", return_value=None) - @patch.object(Redshift, "__init__", return_value=None) - @patch("os.getenv") - def test_no_database_detected(self, mock_getenv, *_): - mock_getenv.return_value = None - with self.assertRaises(EnvironmentError): - discover_database() - - @patch.object(GoogleBigQuery, "__init__", return_value=None) - @patch.object(Postgres, "__init__", return_value=None) - @patch.object(MySQL, "__init__", return_value=None) - @patch.object(Redshift, "__init__", return_value=None) - @patch("os.getenv") - def test_single_database_detected(self, mock_getenv, *_): - mock_getenv.side_effect = ( - lambda var: "password" if var == "REDSHIFT_PASSWORD" else None - ) - self.assertIsInstance(discover_database(), Redshift) - - @patch.object(GoogleBigQuery, "__init__", return_value=None) - @patch.object(Postgres, "__init__", return_value=None) - @patch.object(MySQL, "__init__", return_value=None) - @patch.object(Redshift, "__init__", return_value=None) - @patch("os.getenv") - def test_single_database_detected_with_other_default(self, mock_getenv, *_): - mock_getenv.side_effect = ( - lambda var: "password" if var == "REDSHIFT_PASSWORD" else None - ) - self.assertIsInstance(discover_database(default_connector=Postgres), Redshift) - - @patch.object(GoogleBigQuery, "__init__", return_value=None) - @patch.object(Postgres, "__init__", return_value=None) - @patch.object(MySQL, "__init__", return_value=None) - @patch.object(Redshift, "__init__", return_value=None) - @patch("os.getenv") - def test_single_database_detected_with_other_default_list(self, mock_getenv, *_): - mock_getenv.side_effect = ( - lambda var: "password" if var == "REDSHIFT_PASSWORD" else None - ) - self.assertIsInstance( - discover_database(default_connector=[Postgres, MySQL]), Redshift - ) - - @patch.object(GoogleBigQuery, "__init__", return_value=None) - @patch.object(Postgres, "__init__", return_value=None) - @patch.object(MySQL, "__init__", return_value=None) - @patch.object(Redshift, "__init__", return_value=None) - @patch("os.getenv") - def test_multiple_databases_no_default(self, mock_getenv, *_): - mock_getenv.return_value = "password" - with self.assertRaises(EnvironmentError): - discover_database() - - @patch.object(GoogleBigQuery, "__init__", return_value=None) - @patch.object(Postgres, "__init__", return_value=None) - @patch.object(MySQL, "__init__", return_value=None) - @patch.object(Redshift, "__init__", return_value=None) - @patch("os.getenv") - def test_multiple_databases_with_default(self, mock_getenv, *_): - mock_getenv.return_value = "password" - self.assertIsInstance(discover_database(default_connector=Redshift), Redshift) - - @patch.object(GoogleBigQuery, "__init__", return_value=None) - @patch.object(Postgres, "__init__", return_value=None) - @patch.object(MySQL, "__init__", return_value=None) - @patch.object(Redshift, "__init__", return_value=None) - @patch("os.getenv") - def test_multiple_databases_with_default_list(self, mock_getenv, *_): - mock_getenv.return_value = "password" - self.assertIsInstance( - discover_database(default_connector=[MySQL, Redshift]), MySQL - ) - - @patch.object(GoogleBigQuery, "__init__", return_value=None) - @patch.object(Postgres, "__init__", return_value=None) - @patch.object(MySQL, "__init__", return_value=None) - @patch.object(Redshift, "__init__", return_value=None) - @patch("os.getenv") - def test_multiple_databases_invalid_default(self, mock_getenv, *_): - mock_getenv.side_effect = ( - lambda var: "password" - if var == "REDSHIFT_PASSWORD" or var == "MYSQL_PASSWORD" - else None - ) - with self.assertRaises(EnvironmentError): - discover_database(default_connector=Postgres) - - @patch.object(GoogleBigQuery, "__init__", return_value=None) - @patch.object(Postgres, "__init__", return_value=None) - @patch.object(MySQL, "__init__", return_value=None) - @patch.object(Redshift, "__init__", return_value=None) - @patch("os.getenv") - def test_multiple_databases_invalid_default_list(self, mock_getenv, *_): - mock_getenv.side_effect = ( - lambda var: "password" - if var == "REDSHIFT_PASSWORD" or var == "MYSQL_PASSWORD" - else None - ) - with self.assertRaises(EnvironmentError): - discover_database(default_connector=[Postgres, GoogleBigQuery]) - - -if __name__ == "__main__": - unittest.main() diff --git a/test/test_databases/test_mysql.py b/test/test_databases/test_mysql.py index 01c156dfe4..361a3c229e 100644 --- a/test/test_databases/test_mysql.py +++ b/test/test_databases/test_mysql.py @@ -1,15 +1,15 @@ -from parsons import MySQL, Table +from parsons.databases.mysql.mysql import MySQL from parsons.databases.mysql.create_table import MySQLCreateTable +from parsons.etl.table import Table from test.utils import assert_matching_tables import unittest import os # These tests interact directly with the MySQL database. To run, set env variable "LIVE_TEST=True" -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test') class TestMySQLLive(unittest.TestCase): + def setUp(self): self.mysql = MySQL() @@ -43,32 +43,28 @@ def test_insert_data(self): r = self.mysql.query("select * from test") - assert_matching_tables(Table([{"name": "me", "user_name": "myuser"}]), r) + assert_matching_tables(Table([{'name': 'me', 'user_name': 'myuser'}]), r) # These tests interact directly with the MySQL database. To run, set env variable "LIVE_TEST=True" -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test') class TestMySQL(unittest.TestCase): + def setUp(self): self.mysql = MySQL() # Create tables self.mysql.query( - "CREATE TABLE IF NOT EXISTS test (name VARCHAR(255), user_name VARCHAR(255), id INT)" - ) - self.mysql.query( - """ + "CREATE TABLE IF NOT EXISTS test (name VARCHAR(255), user_name VARCHAR(255), id INT)") + self.mysql.query(""" INSERT INTO test (name, user_name, id) VALUES ('me', 'myuser', '1'), ('you', 'hey', '2'), ('you', 'hey', '3') - """ - ) + """) - self.tbl = MySQLCreateTable(self.mysql, "test") + self.tbl = MySQLCreateTable(self.mysql, 'test') def tearDown(self): @@ -80,22 +76,22 @@ def test_num_rows(self): def test_max_primary_key(self): - self.assertEqual(self.tbl.max_primary_key("id"), 3) + self.assertEqual(self.tbl.max_primary_key('id'), 3) def test_distinct_primary_key(self): - self.assertTrue(self.tbl.distinct_primary_key("id")) - self.assertFalse(self.tbl.distinct_primary_key("user_name")) + self.assertTrue(self.tbl.distinct_primary_key('id')) + self.assertFalse(self.tbl.distinct_primary_key('user_name')) def test_columns(self): - self.assertEqual(self.tbl.columns, ["name", "user_name", "id"]) + self.assertEqual(self.tbl.columns, ['name', 'user_name', 'id']) def test_exists(self): self.assertTrue(self.tbl.exists) - tbl_bad = MySQLCreateTable(self.mysql, "bad_test") + tbl_bad = MySQLCreateTable(self.mysql, 'bad_test') self.assertFalse(tbl_bad.exists) def test_drop(self): @@ -110,87 +106,79 @@ def test_truncate(self): def test_get_rows(self): - data = [ - ["name", "user_name", "id"], - ["me", "myuser", "1"], - ["you", "hey", "2"], - ["you", "hey", "3"], - ] + data = [['name', 'user_name', 'id'], + ['me', 'myuser', '1'], + ['you', 'hey', '2'], + ['you', 'hey', '3']] tbl = Table(data) assert_matching_tables(self.tbl.get_rows(), tbl) def test_get_new_rows(self): - data = [["name", "user_name", "id"], ["you", "hey", "2"], ["you", "hey", "3"]] + data = [['name', 'user_name', 'id'], + ['you', 'hey', '2'], + ['you', 'hey', '3']] tbl = Table(data) # Basic - assert_matching_tables(self.tbl.get_new_rows("id", 1), tbl) + assert_matching_tables(self.tbl.get_new_rows('id', 1), tbl) # Chunking - assert_matching_tables(self.tbl.get_new_rows("id", 1, chunk_size=1), tbl) + assert_matching_tables(self.tbl.get_new_rows('id', 1, chunk_size=1), tbl) def test_get_new_rows_count(self): - self.assertEqual(self.tbl.get_new_rows_count("id", 1), 2) - + self.assertEqual(self.tbl.get_new_rows_count('id', 1), 2) # TODO: figure out why there are 2 of these class TestMySQL(unittest.TestCase): # noqa + def setUp(self): - self.mysql = MySQL( - username="test", password="test", host="test", db="test", port=123 - ) + self.mysql = MySQL(username='test', password='test', host='test', db='test', port=123) - self.tbl = Table( - [ - ["ID", "Name", "Score"], - [1, "Jim", 1.9], - [2, "John", -0.5], - [3, "Sarah", 0.0004], - ] - ) + self.tbl = Table([['ID', 'Name', 'Score'], + [1, 'Jim', 1.9], + [2, 'John', -0.5], + [3, 'Sarah', .0004]]) def test_data_type(self): # Test bool self.mysql.DO_PARSE_BOOLS = True - self.assertEqual(self.mysql.data_type(1, ""), "bool") - self.assertEqual(self.mysql.data_type(False, ""), "bool") + self.assertEqual(self.mysql.data_type(1, ''), 'bool') + self.assertEqual(self.mysql.data_type(False, ''), 'bool') self.mysql.DO_PARSE_BOOLS = False # Test smallint - self.assertEqual(self.mysql.data_type(1, ""), "smallint") - self.assertEqual(self.mysql.data_type(2, ""), "smallint") + self.assertEqual(self.mysql.data_type(1, ''), 'smallint') + self.assertEqual(self.mysql.data_type(2, ''), 'smallint') # Test int - self.assertEqual(self.mysql.data_type(32769, ""), "mediumint") + self.assertEqual(self.mysql.data_type(32769, ''), 'mediumint') # Test bigint - self.assertEqual(self.mysql.data_type(2147483648, ""), "bigint") + self.assertEqual(self.mysql.data_type(2147483648, ''), 'bigint') # Test varchar that looks like an int - self.assertEqual(self.mysql.data_type("00001", ""), "varchar") + self.assertEqual(self.mysql.data_type('00001', ''), 'varchar') # Test varchar that looks like a bool - self.assertEqual(self.mysql.data_type(False, ""), "varchar") + self.assertEqual(self.mysql.data_type(False, ''), 'varchar') # Test a float as a decimal - self.assertEqual(self.mysql.data_type(5.001, ""), "float") + self.assertEqual(self.mysql.data_type(5.001, ''), 'float') # Test varchar - self.assertEqual(self.mysql.data_type("word", ""), "varchar") + self.assertEqual(self.mysql.data_type('word', ''), 'varchar') # Test int with underscore - self.assertEqual(self.mysql.data_type("1_2", ""), "varchar") + self.assertEqual(self.mysql.data_type('1_2', ''), 'varchar') # Test int with leading zero - self.assertEqual(self.mysql.data_type("01", ""), "varchar") + self.assertEqual(self.mysql.data_type('01', ''), 'varchar') def test_evaluate_table(self): - table_map = [ - {"name": "ID", "type": "smallint", "width": 0}, - {"name": "Name", "type": "varchar", "width": 8}, - {"name": "Score", "type": "float", "width": 0}, - ] + table_map = [{'name': 'ID', 'type': 'smallint', 'width': 0}, + {'name': 'Name', 'type': 'varchar', 'width': 8}, + {'name': 'Score', 'type': 'float', 'width': 0}] self.assertEqual(self.mysql.evaluate_table(self.tbl), table_map) def test_create_statement(self): stmt = "CREATE TABLE test_table ( \n id smallint \n,name varchar(10) \n,score float \n);" - self.assertEqual(self.mysql.create_statement(self.tbl, "test_table"), stmt) + self.assertEqual(self.mysql.create_statement(self.tbl, 'test_table'), stmt) diff --git a/test/test_databases/test_postgres.py b/test/test_databases/test_postgres.py index 08956c672d..44cf60d0fb 100644 --- a/test/test_databases/test_postgres.py +++ b/test/test_databases/test_postgres.py @@ -1,35 +1,36 @@ -from parsons import Postgres, Table +from parsons.databases.postgres import Postgres +from parsons.etl.table import Table from test.utils import assert_matching_tables import unittest import os # The name of the schema and will be temporarily created for the tests -TEMP_SCHEMA = "parsons_test" +TEMP_SCHEMA = 'parsons_test' # These tests do not interact with the Postgres Database directly, and don't need real credentials class TestPostgresCreateStatement(unittest.TestCase): + def setUp(self): - self.pg = Postgres( - username="test", password="test", host="test", db="test", port=123 - ) - - self.tbl = Table([["ID", "Name"], [1, "Jim"], [2, "John"], [3, "Sarah"]]) - - self.tbl2 = Table( - [ - ["c1", "c2", "c3", "c4", "c5", "c6", "c7"], - ["a", "", 1, "NA", 1.4, 1, 2], - ["b", "", 2, "NA", 1.4, 1, 2], - ["c", "", 3.4, "NA", "", "", "a"], - ["d", "", 5, "NA", 1.4, 1, 2], - ["e", "", 6, "NA", 1.4, 1, 2], - ["f", "", 7.8, "NA", 1.4, 1, 2], - ["g", "", 9, "NA", 1.4, 1, 2], - ] - ) + self.pg = Postgres(username='test', password='test', host='test', db='test', port=123) + + self.tbl = Table([['ID', 'Name'], + [1, 'Jim'], + [2, 'John'], + [3, 'Sarah']]) + + self.tbl2 = Table([ + ["c1", "c2", "c3", "c4", "c5", "c6", "c7"], + ["a", "", 1, "NA", 1.4, 1, 2], + ["b", "", 2, "NA", 1.4, 1, 2], + ["c", "", 3.4, "NA", "", "", "a"], + ["d", "", 5, "NA", 1.4, 1, 2], + ["e", "", 6, "NA", 1.4, 1, 2], + ["f", "", 7.8, "NA", 1.4, 1, 2], + ["g", "", 9, "NA", 1.4, 1, 2], + ]) self.mapping = self.pg.generate_data_types(self.tbl) self.mapping2 = self.pg.generate_data_types(self.tbl2) @@ -39,83 +40,73 @@ def setUp(self): def test_connection(self): # Test connection with kwargs passed - Postgres(username="test", password="test", host="test", db="test") + Postgres(username='test', password='test', host='test', db='test') # Test connection with env variables - os.environ["PGUSER"] = "user_env" - os.environ["PGPASSWORD"] = "pass_env" - os.environ["PGHOST"] = "host_env" - os.environ["PGDATABASE"] = "db_env" - os.environ["PGPORT"] = "5432" + os.environ['PGUSER'] = 'user_env' + os.environ['PGPASSWORD'] = 'pass_env' + os.environ['PGHOST'] = 'host_env' + os.environ['PGDATABASE'] = 'db_env' + os.environ['PGPORT'] = '5432' pg_env = Postgres() - self.assertEqual(pg_env.username, "user_env") - self.assertEqual(pg_env.password, "pass_env") - self.assertEqual(pg_env.host, "host_env") - self.assertEqual(pg_env.db, "db_env") + self.assertEqual(pg_env.username, 'user_env') + self.assertEqual(pg_env.password, 'pass_env') + self.assertEqual(pg_env.host, 'host_env') + self.assertEqual(pg_env.db, 'db_env') self.assertEqual(pg_env.port, 5432) def test_data_type(self): self.pg.DO_PARSE_BOOLS = False # Test smallint - self.assertEqual(self.pg.data_type(1, ""), "smallint") - self.assertEqual(self.pg.data_type(2, ""), "smallint") + self.assertEqual(self.pg.data_type(1, ''), 'smallint') + self.assertEqual(self.pg.data_type(2, ''), 'smallint') # Test int - self.assertEqual(self.pg.data_type(32769, ""), "int") + self.assertEqual(self.pg.data_type(32769, ''), 'int') # Test bigint - self.assertEqual(self.pg.data_type(2147483648, ""), "bigint") + self.assertEqual(self.pg.data_type(2147483648, ''), 'bigint') # Test varchar that looks like an int - self.assertEqual(self.pg.data_type("00001", ""), "varchar") + self.assertEqual(self.pg.data_type('00001', ''), 'varchar') # Test varchar that looks like a bool - self.assertEqual(self.pg.data_type(True, ""), "varchar") + self.assertEqual(self.pg.data_type(True, ''), 'varchar') # Test a float as a decimal - self.assertEqual(self.pg.data_type(5.001, ""), "decimal") + self.assertEqual(self.pg.data_type(5.001, ''), 'decimal') # Test varchar - self.assertEqual(self.pg.data_type("word", ""), "varchar") + self.assertEqual(self.pg.data_type('word', ''), 'varchar') # Test int with underscore - self.assertEqual(self.pg.data_type("1_2", ""), "varchar") + self.assertEqual(self.pg.data_type('1_2', ''), 'varchar') # Test int with leading zero - self.assertEqual(self.pg.data_type("01", ""), "varchar") + self.assertEqual(self.pg.data_type('01', ''), 'varchar') # Test bool self.pg.DO_PARSE_BOOLS = True - self.assertEqual(self.pg.data_type(1, ""), "bool") - self.assertEqual(self.pg.data_type(True, ""), "bool") + self.assertEqual(self.pg.data_type(1, ''), 'bool') + self.assertEqual(self.pg.data_type(True, ''), 'bool') def test_generate_data_types(self): # Test correct header labels - self.assertEqual(self.mapping["headers"], ["ID", "Name"]) + self.assertEqual(self.mapping['headers'], ['ID', 'Name']) # Test correct data types - self.assertEqual(self.mapping["type_list"], ["smallint", "varchar"]) + self.assertEqual(self.mapping['type_list'], ['smallint', 'varchar']) self.assertEqual( - self.mapping2["type_list"], - [ - "varchar", - "varchar", - "decimal", - "varchar", - "decimal", - "smallint", - "varchar", - ], - ) + self.mapping2['type_list'], + ['varchar', 'varchar', 'decimal', 'varchar', "decimal", "smallint", "varchar"]) self.assertEqual( - self.mapping3["type_list"], - ["varchar", "varchar", "decimal", "varchar", "decimal", "bool", "varchar"], - ) + self.mapping3['type_list'], + ['varchar', 'varchar', 'decimal', 'varchar', "decimal", "bool", "varchar"]) # Test correct lengths - self.assertEqual(self.mapping["longest"], [1, 5]) + self.assertEqual(self.mapping['longest'], [1, 5]) def test_vc_padding(self): # Test padding calculated correctly - self.assertEqual(self.pg.vc_padding(self.mapping, 0.2), [1, 6]) + self.assertEqual(self.pg.vc_padding(self.mapping, .2), [1, 6]) def test_vc_max(self): # Test max sets it to the max - self.assertEqual(self.pg.vc_max(self.mapping, ["Name"]), [1, 65535]) + self.assertEqual(self.pg.vc_max(self.mapping, ['Name']), [1, 65535]) # Test raises when can't find column # To Do @@ -123,60 +114,49 @@ def test_vc_max(self): def test_vc_validate(self): # Test that a column with a width of 0 is set to 1 - self.mapping["longest"][0] = 0 + self.mapping['longest'][0] = 0 self.mapping = self.pg.vc_validate(self.mapping) self.assertEqual(self.mapping, [1, 5]) def test_create_sql(self): # Test the the statement is expected - sql = self.pg.create_sql("tmc.test", self.mapping, distkey="ID") + sql = self.pg.create_sql('tmc.test', self.mapping, distkey='ID') exp_sql = "create table tmc.test (\n id smallint,\n name varchar(5)) \ndistkey(ID) ;" self.assertEqual(sql, exp_sql) def test_column_validate(self): - bad_cols = [ - "a", - "a", - "", - "SELECT", - "asdfjkasjdfklasjdfklajskdfljaskldfjaklsdfjlaksdfjklasjdfklasjdkfljaskldfljkasjdkfasjlkdfjklasdfjklakjsfasjkdfljaslkdfjklasdfjklasjkldfakljsdfjalsdkfjklasjdfklasjdfklasdkljf", # noqa: E501 - ] - fixed_cols = [ - "a", - "a_1", - "col_2", - "col_3", - "asdfjkasjdfklasjdfklajskdfljaskldfjaklsdfjlaksdfjklasjdfklasjdkfljaskldfljkasjdkfasjlkdfjklasdfjklakjsfasjkdfljaslkdfjkl", # noqa: E501 - ] + bad_cols = ['a', 'a', '', 'SELECT', 'asdfjkasjdfklasjdfklajskdfljaskldfjaklsdfjlaksdfjklasjdfklasjdkfljaskldfljkasjdkfasjlkdfjklasdfjklakjsfasjkdfljaslkdfjklasdfjklasjkldfakljsdfjalsdkfjklasjdfklasjdfklasdkljf'] # noqa: E501 + fixed_cols = ['a', 'a_1', 'col_2', 'col_3', 'asdfjkasjdfklasjdfklajskdfljaskldfjaklsdfjlaksdfjklasjdfklasjdkfljaskldfljkasjdkfasjlkdfjklasdfjklakjsfasjkdfljaslkdfjkl'] # noqa: E501 self.assertEqual(self.pg.column_name_validate(bad_cols), fixed_cols) def test_create_statement(self): # Assert that copy statement is expected - sql = self.pg.create_statement(self.tbl, "tmc.test", distkey="ID") + sql = self.pg.create_statement(self.tbl, 'tmc.test', distkey='ID') exp_sql = """create table tmc.test (\n "id" smallint,\n "name" varchar(5)) \ndistkey(ID) ;""" # noqa: E501 self.assertEqual(sql, exp_sql) # Assert that an error is raised by an empty table - empty_table = Table([["Col_1", "Col_2"]]) - self.assertRaises(ValueError, self.pg.create_statement, empty_table, "tmc.test") - + empty_table = Table([['Col_1', 'Col_2']]) + self.assertRaises(ValueError, self.pg.create_statement, empty_table, 'tmc.test') # These tests interact directly with the Postgres database -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test') class TestPostgresDB(unittest.TestCase): + def setUp(self): self.temp_schema = TEMP_SCHEMA self.pg = Postgres() - self.tbl = Table([["ID", "Name"], [1, "Jim"], [2, "John"], [3, "Sarah"]]) + self.tbl = Table([['ID', 'Name'], + [1, 'Jim'], + [2, 'John'], + [3, 'Sarah']]) # Create a schema, create a table, create a view setup_sql = f""" @@ -204,74 +184,66 @@ def tearDown(self): def test_query(self): # Check that query sending back expected result - r = self.pg.query("select 1") - self.assertEqual(r[0]["?column?"], 1) + r = self.pg.query('select 1') + self.assertEqual(r[0]['?column?'], 1) def test_query_with_parameters(self): table_name = f"{self.temp_schema}.test" - self.pg.copy(self.tbl, f"{self.temp_schema}.test", if_exists="append") + self.pg.copy(self.tbl, f"{self.temp_schema}.test", if_exists='append') sql = f"select * from {table_name} where name = %s" - name = "Sarah" + name = 'Sarah' r = self.pg.query(sql, parameters=[name]) - self.assertEqual(r[0]["name"], name) + self.assertEqual(r[0]['name'], name) sql = f"select * from {table_name} where name in (%s, %s)" - names = ["Sarah", "John"] + names = ['Sarah', 'John'] r = self.pg.query(sql, parameters=names) self.assertEqual(r.num_rows, 2) def test_copy(self): # Copy a table and ensure table exists - self.pg.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="drop") - r = self.pg.query( - f"select * from {self.temp_schema}.test_copy where name='Jim'" - ) - self.assertEqual(r[0]["id"], 1) + self.pg.copy(self.tbl, f'{self.temp_schema}.test_copy', if_exists='drop') + r = self.pg.query(f"select * from {self.temp_schema}.test_copy where name='Jim'") + self.assertEqual(r[0]['id'], 1) # Copy table and ensure truncate works. - self.pg.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="truncate") + self.pg.copy(self.tbl, f'{self.temp_schema}.test_copy', if_exists='truncate') tbl = self.pg.query(f"select count(*) from {self.temp_schema}.test_copy") self.assertEqual(tbl.first, 3) # Copy table and ensure that drop works. - self.pg.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="drop") + self.pg.copy(self.tbl, f'{self.temp_schema}.test_copy', if_exists='drop') tbl = self.pg.query(f"select count(*) from {self.temp_schema}.test_copy") self.assertEqual(tbl.first, 3) # Copy table and ensure that append works. - self.pg.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="append") + self.pg.copy(self.tbl, f'{self.temp_schema}.test_copy', if_exists='append') tbl = self.pg.query(f"select count(*) from {self.temp_schema}.test_copy") self.assertEqual(tbl.first, 6) # Try to copy the table and ensure that default fail works. - self.assertRaises( - ValueError, self.pg.copy, self.tbl, f"{self.temp_schema}.test_copy" - ) + self.assertRaises(ValueError, self.pg.copy, self.tbl, f'{self.temp_schema}.test_copy') # Try to copy the table and ensure that explicit fail works. self.assertRaises( - ValueError, - self.pg.copy, - self.tbl, - f"{self.temp_schema}.test_copy", - if_exists="fail", - ) + ValueError, self.pg.copy, self.tbl, f'{self.temp_schema}.test_copy', if_exists='fail') def test_to_postgres(self): - self.tbl.to_postgres(f"{self.temp_schema}.test_copy") - r = self.pg.query( - f"select * from {self.temp_schema}.test_copy where name='Jim'" - ) - self.assertEqual(r[0]["id"], 1) + self.tbl.to_postgres(f'{self.temp_schema}.test_copy') + r = self.pg.query(f"select * from {self.temp_schema}.test_copy where name='Jim'") + self.assertEqual(r[0]['id'], 1) def test_from_postgres(self): - tbl = Table([["id", "name"], [1, "Jim"], [2, "John"], [3, "Sarah"]]) + tbl = Table([['id', 'name'], + [1, 'Jim'], + [2, 'John'], + [3, 'Sarah']]) - self.pg.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="drop") + self.pg.copy(self.tbl, f'{self.temp_schema}.test_copy', if_exists='drop') out_tbl = self.tbl.from_postgres(f"SELECT * FROM {self.temp_schema}.test_copy") assert_matching_tables(out_tbl, tbl) diff --git a/test/test_donorbox/donorbox_test_data.py b/test/test_donorbox/donorbox_test_data.py deleted file mode 100644 index ec69dddc1b..0000000000 --- a/test/test_donorbox/donorbox_test_data.py +++ /dev/null @@ -1,367 +0,0 @@ -campaign_1_json = { - "created_at": "2022-09-20T18:47:05.381Z", - "currency": "usd", - "donations_count": 0, - "formatted_goal_amount": "$1,000", - "formatted_total_raised": "$0", - "goal_amt": "1000.0", - "id": 366172, - "name": "Test Campaign", - "slug": "test-campaign-623", - "total_raised": "0.0", - "updated_at": "2022-09-21T05:38:28.915Z", -} - -campaign_2_json = { - "id": 366590, - "name": "Membership Campaign", - "slug": "membership-campaign-10", - "currency": "usd", - "created_at": "2022-09-21T21:05:04.909Z", - "updated_at": "2022-10-19T05:39:20.993Z", - "goal_amt": "0.0", - "formatted_goal_amount": "$0", - "total_raised": "0.0", - "formatted_total_raised": "$0", - "donations_count": 0, -} - -get_campaigns_response_json = [campaign_1_json] - -get_campaigns_filtered_response_json = [campaign_2_json] - -get_campaigns_desc_order = [campaign_2_json, campaign_1_json] - -get_campaigns_asc_order = [campaign_1_json, campaign_2_json] - -donation_1_json = { - "campaign": { - "id": 366172, - "name": "Test Campaign", - "started_at": "2022-09-20T18:47:05.381Z", - }, - "donor": { - "id": 7508840, - "name": "Megan Rapinoe", - "first_name": "Megan", - "last_name": "Rapinoe", - "email": "fakeemail@email.com", - "phone": None, - "address": None, - "city": None, - "state": None, - "zip_code": None, - "country": None, - "employer": None, - "occupation": None, - }, - "amount": "3.0", - "formatted_amount": "$3", - "converted_amount": "3.0", - "formatted_converted_amount": "$3", - "converted_net_amount": "2.57", - "formatted_converted_net_amount": "$2.57", - "recurring": True, - "first_recurring_donation": True, - "amount_refunded": "0.0", - "formatted_amount_refunded": "", - "stripe_charge_id": "ABCDEFG123132", - "id": 25497167, - "status": "paid", - "donation_type": "stripe", - "donation_date": "2022-10-19T17:32:52.613Z", - "anonymous_donation": False, - "gift_aid": False, - "designation": None, - "join_mailing_list": False, - "comment": "testing testing", - "donating_company": None, - "currency": "USD", - "converted_currency": "USD", - "utm_campaign": None, - "utm_source": None, - "utm_medium": None, - "utm_term": None, - "utm_content": None, - "processing_fee": 0.39, - "formatted_processing_fee": "$0.39", - "fee_covered": False, - "questions": [], - "plan_id": 1173773, - "interval": "1 M", -} - -donation_2_json = { - "campaign": { - "id": 366172, - "name": "Test Campaign", - "started_at": "2022-09-20T18:47:05.381Z", - }, - "donor": { - "id": 7509137, - "name": "Crystal Dunn", - "first_name": "Crystal", - "last_name": "Dunn", - "email": "fake2mail2@gmail.com", - "phone": None, - "address": None, - "city": None, - "state": None, - "zip_code": None, - "country": None, - "employer": None, - "occupation": None, - }, - "amount": "4.0", - "formatted_amount": "$4", - "converted_amount": "4.0", - "formatted_converted_amount": "$4", - "converted_net_amount": "3.52", - "formatted_converted_net_amount": "$3.52", - "recurring": True, - "first_recurring_donation": True, - "amount_refunded": "0.0", - "formatted_amount_refunded": "", - "stripe_charge_id": "31231213123ASAD", - "id": 25497700, - "status": "paid", - "donation_type": "stripe", - "donation_date": "2022-10-19T18:19:06.044Z", - "anonymous_donation": False, - "gift_aid": False, - "designation": None, - "join_mailing_list": False, - "comment": "bleep bloop", - "donating_company": None, - "currency": "USD", - "converted_currency": "USD", - "utm_campaign": None, - "utm_source": None, - "utm_medium": None, - "utm_term": None, - "utm_content": None, - "processing_fee": 0.42, - "formatted_processing_fee": "$0.42", - "fee_covered": False, - "questions": [], - "plan_id": 1173856, - "interval": "1 M", -} - -donation_3_json = { - "campaign": { - "id": 366172, - "name": "Test Campaign", - "started_at": "2022-09-20T18:47:05.381Z", - }, - "donor": { - "id": 7508840, - "name": "Rose Lavelle", - "first_name": "Rose", - "last_name": "Lavelle", - "email": "fake@fakeemail.com", - "phone": None, - "address": None, - "city": None, - "state": None, - "zip_code": None, - "country": None, - "employer": None, - "occupation": None, - }, - "amount": "3.0", - "formatted_amount": "$3", - "converted_amount": "3.0", - "formatted_converted_amount": "$3", - "converted_net_amount": "2.57", - "formatted_converted_net_amount": "$2.57", - "recurring": True, - "first_recurring_donation": True, - "amount_refunded": "0.0", - "formatted_amount_refunded": "", - "stripe_charge_id": "123ABC123ABC", - "id": 25525370, - "status": "paid", - "donation_type": "stripe", - "donation_date": "2022-10-20T19:33:31.744Z", - "anonymous_donation": False, - "gift_aid": False, - "designation": None, - "join_mailing_list": False, - "comment": None, - "donating_company": None, - "currency": "USD", - "converted_currency": "USD", - "utm_campaign": None, - "utm_source": None, - "utm_medium": None, - "utm_term": None, - "utm_content": None, - "processing_fee": 0.39, - "formatted_processing_fee": "$0.39", - "fee_covered": False, - "questions": [], - "plan_id": 1175651, - "interval": "1 M", -} - - -get_donations_response_json = [donation_3_json, donation_1_json, donation_2_json] - -get_donations_amount_min_3 = [donation_3_json, donation_1_json, donation_2_json] -get_donations_amount_min_4 = [donation_2_json] -get_donations_amount_min_5 = [] - -get_donations_amount_max_3 = [donation_3_json, donation_1_json] -get_donations_amount_max_4 = [donation_3_json, donation_1_json, donation_2_json] -get_donations_amount_max_2 = [] - -get_donations_date_from_valid = [] -get_donations_date_from_invalid = [] - -donor_1_json = { - "id": 7508840, - "created_at": "2022-10-19T17:33:18.935Z", - "updated_at": "2022-10-20T19:34:07.127Z", - "first_name": "Elizabeth", - "last_name": "Warren", - "email": "elizabethwarren@senate.gov", - "phone": None, - "address": None, - "city": None, - "state": None, - "zip_code": None, - "country": None, - "employer": None, - "occupation": None, - "comment": None, - "donations_count": 2, - "last_donation_at": "2022-10-20T19:33:31.744Z", - "total": [{"currency": "USD", "value": 6.0}], -} - -donor_2_json = { - "id": 7509137, - "created_at": "2022-10-19T18:19:41.794Z", - "updated_at": "2022-10-19T18:19:41.859Z", - "first_name": "Paul", - "last_name": "Wellstone", - "email": "paulwellstone@senate.gov", - "phone": None, - "address": None, - "city": None, - "state": None, - "zip_code": None, - "country": None, - "employer": None, - "occupation": None, - "comment": None, - "donations_count": 1, - "last_donation_at": "2022-10-19T18:19:06.044Z", - "total": [{"currency": "USD", "value": 4.0}], -} - -get_donors_response_json = [donor_1_json, donor_2_json] - -get_donors_response_json_first_name_filter = [donor_1_json] -get_donors_response_json_last_name_filter = [donor_1_json] -get_donors_response_json_donor_name_filter = [donor_2_json] -get_donors_response_json_email_filter = [donor_2_json] - -plan_1_json = { - "id": 1175651, - "campaign": { - "id": 366172, - "name": "Test Campaign", - "started_at": "2022-09-20T18:47:05.381Z", - }, - "donor": { - "id": 7508840, - "name": "Jane Doe", - "first_name": "Doe", - "last_name": "Doe", - "email": "example@example.com", - "phone": None, - "address": None, - "city": None, - "state": None, - "zip_code": None, - "country": None, - "employer": None, - "occupation": None, - }, - "type": "monthly", - "amount": "3.0", - "formatted_amount": "$3", - "payment_method": "Stripe", - "started_at": "2022-10-20", - "last_donation_date": "2022-10-20T19:33:31.744Z", - "next_donation_date": "2022-11-20", - "status": "active", -} - -plan_2_json = { - "id": 1173856, - "campaign": { - "id": 366172, - "name": "Test Campaign", - "started_at": "2022-09-20T18:47:05.381Z", - }, - "donor": { - "id": 7509137, - "name": "Jane Doe", - "first_name": "Doe", - "last_name": "Jane Doe", - "email": "example@gmail.com", - "phone": None, - "address": None, - "city": None, - "state": None, - "zip_code": None, - "country": None, - "employer": None, - "occupation": None, - }, - "type": "monthly", - "amount": "4.0", - "formatted_amount": "$4", - "payment_method": "Stripe", - "started_at": "2022-10-19", - "last_donation_date": "2022-10-19T18:19:06.044Z", - "next_donation_date": "2022-11-19", - "status": "active", -} - -plan_3_json = { - "id": 1173773, - "campaign": { - "id": 366172, - "name": "Test Campaign", - "started_at": "2022-09-20T18:47:05.381Z", - }, - "donor": { - "id": 7508840, - "name": "Jane Doe", - "first_name": "Jane", - "last_name": "Doe", - "email": "example2@gmail.com", - "phone": None, - "address": None, - "city": None, - "state": None, - "zip_code": None, - "country": None, - "employer": None, - "occupation": None, - }, - "type": "monthly", - "amount": "3.0", - "formatted_amount": "$3", - "payment_method": "Stripe", - "started_at": "2022-10-19", - "last_donation_date": "2022-10-19T17:32:52.613Z", - "next_donation_date": "2022-11-19", - "status": "active", -} - -get_plans_response_json = [plan_1_json, plan_2_json, plan_3_json] diff --git a/test/test_donorbox/test_donorbox.py b/test/test_donorbox/test_donorbox.py deleted file mode 100644 index a8fe8c529a..0000000000 --- a/test/test_donorbox/test_donorbox.py +++ /dev/null @@ -1,504 +0,0 @@ -import unittest -import requests_mock - -from parsons import Table, Donorbox -from parsons.donorbox.donorbox import URI -from test.utils import mark_live_test -from test.test_donorbox import donorbox_test_data - - -# NOTE: Donorbox does not provide free sandbox accounts to developers. To enable live tests, -# get a paid account and remove the @skip decorators on the live tests below. - - -class TestDonorbox(unittest.TestCase): - def setUp(self): - self.base_uri = URI - self.donorbox = Donorbox(email="testemail@examp.org", api_key="faketestapikey") - - @requests_mock.Mocker() - def test_get_campaigns(self, m): - - m.get( - self.base_uri + "/campaigns", - json=donorbox_test_data.get_campaigns_response_json, - ) - result = self.donorbox.get_campaigns() - - # Assert the method returns expected dict response - self.assertDictEqual( - result.to_dicts()[0], donorbox_test_data.get_campaigns_response_json[0] - ) - columns = [ - "id", - "name", - "slug", - "currency", - "created_at", - "updated_at", - "goal_amt", - "formatted_goal_amount", - "total_raised", - "formatted_total_raised", - "donations_count", - ] - self.assertCountEqual(result.columns, columns) - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_campaigns_live_test(self): - result = self.donorbox.get_campaigns() - self.assertIsInstance(result, Table) - columns = [ - "id", - "name", - "slug", - "currency", - "created_at", - "updated_at", - "goal_amt", - "formatted_goal_amount", - "total_raised", - "formatted_total_raised", - "donations_count", - ] - self.assertEqual(result.columns, columns) - - @requests_mock.Mocker() - def test_get_campaigns_with_id_filter(self, m): - m.get( - self.base_uri + "/campaigns", - json=donorbox_test_data.get_campaigns_filtered_response_json, - ) - result = self.donorbox.get_campaigns(id=366590) - self.assertIsInstance(result, Table) - self.assertEqual(result.num_rows, 1) - self.assertEqual(result.to_dicts()[0]["id"], 366590) - self.assertEqual(result.to_dicts()[0]["name"], "Membership Campaign") - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_campaigns_with_id_filter_live_test(self): - result = self.donorbox.get_campaigns(id=366590) - self.assertIsInstance(result, Table) - self.assertEqual(result.num_rows, 1) - self.assertEqual(result.to_dicts()[0]["id"], 366590) - self.assertEqual(result.to_dicts()[0]["name"], "Membership Campaign") - - @requests_mock.Mocker() - def test_get_campaigns_with_name_filter(self, m): - m.get( - self.base_uri + "/campaigns", - json=donorbox_test_data.get_campaigns_filtered_response_json, - ) - result = self.donorbox.get_campaigns(name="Membership Campaign") - self.assertIsInstance(result, Table) - self.assertEqual(result.num_rows, 1) - self.assertEqual(result.to_dicts()[0]["id"], 366590) - self.assertEqual(result.to_dicts()[0]["name"], "Membership Campaign") - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_campaigns_with_name_filter_live_test(self): - result = self.donorbox.get_campaigns(name="Membership Campaign") - self.assertIsInstance(result, Table) - self.assertEqual(result.num_rows, 1) - self.assertEqual(result.to_dicts()[0]["id"], 366590) - self.assertEqual(result.to_dicts()[0]["name"], "Membership Campaign") - - @requests_mock.Mocker() - def test_get_campaigns_with_order_filter(self, m): - - m.get( - self.base_uri + "/campaigns", - json=donorbox_test_data.get_campaigns_desc_order, - ) - result = self.donorbox.get_campaigns(order="desc") - self.assertEqual(result["id"], [366590, 366172]) - - m.get( - self.base_uri + "/campaigns", - json=donorbox_test_data.get_campaigns_asc_order, - ) - result = self.donorbox.get_campaigns(order="asc") - self.assertEqual(result["id"], [366172, 366590]) - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_campaigns_with_order_filter_live_test(self): - # check order of the ids without looking at IDs. or maybe look at updated/created date - result = self.donorbox.get_campaigns() - self.assertEqual(result["id"], [366590, 366172]) - result = self.donorbox.get_campaigns(order="desc") - self.assertEqual(result["id"], [366590, 366172]) - result = self.donorbox.get_campaigns(order="asc") - self.assertEqual(result["id"], [366172, 366590]) - - @requests_mock.Mocker() - def test_get_donations(self, m): - - m.get( - self.base_uri + "/donations", - json=donorbox_test_data.get_donations_response_json, - ) - result = self.donorbox.get_donations() - - # Assert the method returns expected dict response - self.assertDictEqual( - result.to_dicts()[0], donorbox_test_data.get_donations_response_json[0] - ) - columns = [ - "campaign", - "donor", - "amount", - "formatted_amount", - "converted_amount", - "formatted_converted_amount", - "converted_net_amount", - "formatted_converted_net_amount", - "recurring", - "first_recurring_donation", - "amount_refunded", - "formatted_amount_refunded", - "stripe_charge_id", - "id", - "status", - "donation_type", - "donation_date", - "anonymous_donation", - "gift_aid", - "designation", - "join_mailing_list", - "comment", - "donating_company", - "currency", - "converted_currency", - "utm_campaign", - "utm_source", - "utm_medium", - "utm_term", - "utm_content", - "processing_fee", - "formatted_processing_fee", - "fee_covered", - "questions", - "plan_id", - "interval", - ] - self.assertCountEqual(result.columns, columns) - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_donations_live_test(self): - result = self.donorbox.get_donations() - self.assertIsInstance(result, Table) - columns = [ - "campaign", - "donor", - "amount", - "formatted_amount", - "converted_amount", - "formatted_converted_amount", - "converted_net_amount", - "formatted_converted_net_amount", - "recurring", - "first_recurring_donation", - "amount_refunded", - "formatted_amount_refunded", - "stripe_charge_id", - "id", - "status", - "donation_type", - "donation_date", - "anonymous_donation", - "gift_aid", - "designation", - "join_mailing_list", - "comment", - "donating_company", - "currency", - "converted_currency", - "utm_campaign", - "utm_source", - "utm_medium", - "utm_term", - "utm_content", - "processing_fee", - "formatted_processing_fee", - "fee_covered", - "questions", - "plan_id", - "interval", - ] - self.assertEqual(result.columns, columns) - self.assertEqual(result.num_rows, 3) - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_donations_with_date_from_filter_live_test(self): - # Correct formats (YYYY-mm-dd YYYY/mm/dd YYYYmmdd dd-mm-YYYY) successfully filter - result = self.donorbox.get_donations(date_from="2022-10-20") - self.assertIsInstance(result, Table) - self.assertEqual(result.num_rows, 1) - self.assertEqual(result[0]["donation_date"], "2022-10-20T19:33:31.744Z") - # Try the other three formats quickly - for date_string in ["2022/10/20", "20221020", "20-10-2022"]: - self.assertEqual( - self.donorbox.get_donations(date_from=date_string).num_rows, 1 - ) - # Incorrect formats raise error - with self.assertRaises(ValueError): - result = self.donorbox.get_donations(date_from="10 20 2022") - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_donations_with_date_to_filter_live_test(self): - # Correct formats (YYYY-mm-dd YYYY/mm/dd YYYYmmdd dd-mm-YYYY) successfully filter - result = self.donorbox.get_donations(date_to="2022-10-20") - self.assertIsInstance(result, Table) - self.assertEqual(result.num_rows, 2) - self.assertEqual(result[0]["donation_date"], "2022-10-19T18:19:06.044Z") - # Try the other three formats quickly - for date_string in ["2022/10/20", "20221020", "20-10-2022"]: - self.assertEqual( - self.donorbox.get_donations(date_to=date_string).num_rows, 2 - ) - # Incorrect formats raise error - with self.assertRaises(ValueError): - result = self.donorbox.get_donations(date_to="10 20 2022") - - @requests_mock.Mocker() - def test_get_donations_with_amount_min_filter(self, m): - m.get( - self.base_uri + "/donations", - json=donorbox_test_data.get_donations_amount_min_3, - ) - result = self.donorbox.get_donations(amount_min="3") - self.assertEqual(result.num_rows, 3) - m.get( - self.base_uri + "/donations", - json=donorbox_test_data.get_donations_amount_min_4, - ) - result = self.donorbox.get_donations(amount_min="4") - self.assertEqual(result.num_rows, 1) - m.get( - self.base_uri + "/donations", - json=donorbox_test_data.get_donations_amount_min_5, - ) - result = self.donorbox.get_donations(amount_min="5") - self.assertEqual(result.num_rows, 0) - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_donations_with_amount_min_filter_live_test(self): - result = self.donorbox.get_donations(amount_min="3") - self.assertEqual(result.num_rows, 3) - result = self.donorbox.get_donations(amount_min="4") - self.assertEqual(result.num_rows, 1) - result = self.donorbox.get_donations(amount_min="5") - self.assertEqual(result.num_rows, 0) - - @requests_mock.Mocker() - def test_get_donations_with_amount_max_filter(self, m): - m.get( - self.base_uri + "/donations", - json=donorbox_test_data.get_donations_amount_max_3, - ) - result = self.donorbox.get_donations(amount_max="3") - self.assertEqual(result.num_rows, 2) - m.get( - self.base_uri + "/donations", - json=donorbox_test_data.get_donations_amount_max_4, - ) - result = self.donorbox.get_donations(amount_max="4") - self.assertEqual(result.num_rows, 3) - m.get( - self.base_uri + "/donations", - json=donorbox_test_data.get_donations_amount_max_2, - ) - result = self.donorbox.get_donations(amount_max="2") - self.assertEqual(result.num_rows, 0) - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_donations_with_amount_max_filter_live_test(self): - result = self.donorbox.get_donations(amount_max="3") - self.assertEqual(result.num_rows, 2) - result = self.donorbox.get_donations(amount_max="4") - self.assertEqual(result.num_rows, 3) - result = self.donorbox.get_donations(amount_max="2") - self.assertEqual(result.num_rows, 0) - - @requests_mock.Mocker() - def test_get_donors(self, m): - - m.get( - self.base_uri + "/donors", json=donorbox_test_data.get_donors_response_json - ) - result = self.donorbox.get_donors() - - # Assert the method returns expected dict response - self.assertDictEqual( - result.to_dicts()[0], donorbox_test_data.get_donors_response_json[0] - ) - columns = [ - "id", - "created_at", - "updated_at", - "first_name", - "last_name", - "email", - "phone", - "address", - "city", - "state", - "zip_code", - "country", - "employer", - "occupation", - "comment", - "donations_count", - "last_donation_at", - "total", - ] - self.assertCountEqual(result.columns, columns) - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_donors_live_test(self): - result = self.donorbox.get_donors() - self.assertIsInstance(result, Table) - columns = [ - "id", - "created_at", - "updated_at", - "first_name", - "last_name", - "email", - "phone", - "address", - "city", - "state", - "zip_code", - "country", - "employer", - "occupation", - "comment", - "donations_count", - "last_donation_at", - "total", - ] - self.assertEqual(result.columns, columns) - self.assertEqual(result.num_rows, 2) - - @requests_mock.Mocker() - def test_get_donors_with_name_and_email_filters(self, m): - m.get( - self.base_uri + "/donors", - json=donorbox_test_data.get_donors_response_json_first_name_filter, - ) - result = self.donorbox.get_donors(first_name="Elizabeth") - self.assertEqual(result.num_rows, 1) - self.assertEqual(result[0]["last_name"], "Warren") - m.get( - self.base_uri + "/donors", - json=donorbox_test_data.get_donors_response_json_last_name_filter, - ) - result = self.donorbox.get_donors(last_name="Warren") - self.assertEqual(result.num_rows, 1) - self.assertEqual(result[0]["first_name"], "Elizabeth") - m.get( - self.base_uri + "/donors", - json=donorbox_test_data.get_donors_response_json_donor_name_filter, - ) - result = self.donorbox.get_donors(donor_name="Paul Wellstone") - self.assertEqual(result.num_rows, 1) - self.assertEqual(result[0]["email"], "paulwellstone@senate.gov") - m.get( - self.base_uri + "/donors", - json=donorbox_test_data.get_donors_response_json_email_filter, - ) - result = self.donorbox.get_donors(email="paulwellstone@senate.gov") - self.assertEqual(result.num_rows, 1) - self.assertEqual(result[0]["first_name"], "Paul") - - @requests_mock.Mocker() - def test_get_plans(self, m): - m.get(self.base_uri + "/plans", json=donorbox_test_data.get_plans_response_json) - result = self.donorbox.get_plans() - assert isinstance(result, Table) - columns = [ - "id", - "campaign", - "donor", - "type", - "amount", - "formatted_amount", - "payment_method", - "started_at", - "last_donation_date", - "next_donation_date", - "status", - ] - assert result.columns == columns - assert result.num_rows == 3 - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_plans_live_test(self): - result = self.donorbox.get_plans() - assert isinstance(result, Table) - columns = [ - "id", - "campaign", - "donor", - "type", - "amount", - "formatted_amount", - "payment_method", - "started_at", - "last_donation_date", - "next_donation_date", - "status", - ] - assert result.columns == columns - assert result.num_rows == 3 - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_plans_with_date_from_filter_live_test(self): - # Correct formats (YYYY-mm-dd YYYY/mm/dd YYYYmmdd dd-mm-YYYY) successfully filter - result = self.donorbox.get_plans(date_from="2022-10-20") - assert isinstance(result, Table) - assert result.num_rows == 1 - assert result[0]["started_at"] == "2022-10-20" - # Try the other three formats quickly - for date_string in ["2022/10/20", "20221020", "20-10-2022"]: - assert self.donorbox.get_plans(date_from=date_string).num_rows == 1 - # Incorrect formats raise error - with self.assertRaises(ValueError): - result = self.donorbox.get_plans(date_from="10 20 2022") - - @unittest.skip("requires live account setup") - @mark_live_test - def test_get_plans_with_date_to_filter_live_test(self): - # Correct formats (YYYY-mm-dd YYYY/mm/dd YYYYmmdd dd-mm-YYYY) successfully filter - result = self.donorbox.get_plans(date_to="2022-10-20") - assert isinstance(result, Table) - assert result.num_rows == 2 - assert result[0]["started_at"] == "2022-10-19" - # Try the other three formats quickly - for date_string in ["2022/10/20", "20221020", "20-10-2022"]: - assert self.donorbox.get_plans(date_to=date_string).num_rows == 2 - # Incorrect formats raise error - with self.assertRaises(ValueError): - result = self.donorbox.get_plans(date_to="10 20 2022") - - def test_date_format_helper(self): - # valid formats work (should just run without error) - for good_format in ["2022-10-20", "2022/10/20", "20221020", "20-10-2022"]: - self.donorbox._date_format_helper(good_format) - # invalid formats raise errors - for bad_format in ["10 20 2022", "October 20th, 2022", "22-10-20"]: - with self.assertRaises(ValueError): - self.donorbox._date_format_helper(bad_format) diff --git a/test/test_etl.py b/test/test_etl.py index 17be8ad1ef..78202169a3 100644 --- a/test/test_etl.py +++ b/test/test_etl.py @@ -1,10 +1,9 @@ +import unittest +import petl +from parsons.etl.table import Table import os import shutil -import unittest from test.utils import assert_matching_tables - -import petl -from parsons import Table from parsons.utilities import zip_archive # Notes : @@ -13,44 +12,43 @@ class TestParsonsTable(unittest.TestCase): + def setUp(self): # Create Table object - self.lst = [ - {"a": 1, "b": 2, "c": 3}, - {"a": 4, "b": 5, "c": 6}, - {"a": 7, "b": 8, "c": 9}, - {"a": 10, "b": 11, "c": 12}, - {"a": 13, "b": 14, "c": 15}, - ] - self.lst_dicts = [{"first": "Bob", "last": "Smith"}] + self.lst = [{'a': 1, 'b': 2, 'c': 3}, + {'a': 4, 'b': 5, 'c': 6}, + {'a': 7, 'b': 8, 'c': 9}, + {'a': 10, 'b': 11, 'c': 12}, + {'a': 13, 'b': 14, 'c': 15}] + self.lst_dicts = [{'first': 'Bob', 'last': 'Smith'}] self.tbl = Table(self.lst_dicts) # Create a tmp dir - os.mkdir("tmp") + os.mkdir('tmp') def tearDown(self): # Delete tmp folder and files - shutil.rmtree("tmp") + shutil.rmtree('tmp') def test_from_list_of_dicts(self): tbl = Table(self.lst) # Test Iterate and is list like - self.assertEqual(tbl[0], {"a": 1, "b": 2, "c": 3}) + self.assertEqual(tbl[0], {'a': 1, 'b': 2, 'c': 3}) def test_from_list_of_lists(self): list_of_lists = [ - ["a", "b", "c"], + ['a', 'b', 'c'], [1, 2, 3], [4, 5, 6], ] tbl = Table(list_of_lists) - self.assertEqual(tbl[0], {"a": 1, "b": 2, "c": 3}) + self.assertEqual(tbl[0], {'a': 1, 'b': 2, 'c': 3}) def test_from_petl(self): @@ -91,19 +89,19 @@ def test_materialize_to_file(self): def test_empty_column(self): # Test that returns True on an empty column and False on a populated one. - tbl = Table([["a", "b"], ["1", None], ["2", None]]) + tbl = Table([['a', 'b'], ['1', None], ['2', None]]) - self.assertTrue(tbl.empty_column("b")) - self.assertFalse(tbl.empty_column("a")) + self.assertTrue(tbl.empty_column('b')) + self.assertFalse(tbl.empty_column('a')) def test_from_columns(self): - header = ["col1", "col2"] + header = ['col1', 'col2'] col1 = [1, 2, 3] - col2 = ["a", "b", "c"] + col2 = ['a', 'b', 'c'] tbl = Table.from_columns([col1, col2], header=header) - self.assertEqual(tbl[0], {"col1": 1, "col2": "a"}) + self.assertEqual(tbl[0], {'col1': 1, 'col2': 'a'}) # Removing this test since it is an optional dependency. """ @@ -130,29 +128,27 @@ def test_to_petl(self): def test_to_html(self): - html_file = "tmp/test.html" + html_file = 'tmp/test.html' # Test writing file self.tbl.to_html(html_file) # Test written correctly - html = ( - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "
firstlast
BobSmith
\n" - ) - with open(html_file, "r") as f: + html = ("\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "
firstlast
BobSmith
\n") + with open(html_file, 'r') as f: self.assertEqual(f.read(), html) def test_to_temp_html(self): @@ -161,23 +157,21 @@ def test_to_temp_html(self): path = self.tbl.to_html() # Written correctly - html = ( - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "\n" - "
firstlast
BobSmith
\n" - ) - with open(path, "r") as f: + html = ("\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "\n" + "
firstlast
BobSmith
\n") + with open(path, 'r') as f: self.assertEqual(f.read(), html) def _assert_expected_csv(self, path, orig_tbl): @@ -185,13 +179,13 @@ def _assert_expected_csv(self, path, orig_tbl): assert_matching_tables(orig_tbl, result_tbl) def test_to_from_csv(self): - path = "tmp/test.csv" + path = 'tmp/test.csv' self.tbl.to_csv(path) self._assert_expected_csv(path, self.tbl) os.remove(path) def test_to_from_csv_compressed(self): - path = "tmp/test.csv.gz" + path = 'tmp/test.csv.gz' self.tbl.to_csv(path) self._assert_expected_csv(path, self.tbl) os.remove(path) @@ -201,21 +195,21 @@ def test_to_from_temp_csv(self): self._assert_expected_csv(path, self.tbl) def test_to_from_temp_csv_compressed(self): - path = self.tbl.to_csv(temp_file_compression="gzip") + path = self.tbl.to_csv(temp_file_compression='gzip') self._assert_expected_csv(path, self.tbl) def test_from_csv_string(self): path = self.tbl.to_csv() # Pull the file into a string - with open(path, "r") as f: + with open(path, 'r') as f: str = f.read() result_tbl = Table.from_csv_string(str) assert_matching_tables(self.tbl, result_tbl) def test_append_csv_compressed(self): - path = self.tbl.to_csv(temp_file_compression="gzip") - append_tbl = Table([{"first": "Mary", "last": "Nichols"}]) + path = self.tbl.to_csv(temp_file_compression='gzip') + append_tbl = Table([{'first': 'Mary', 'last': 'Nichols'}]) append_tbl.append_csv(path) result_tbl = Table.from_csv(path) @@ -225,8 +219,8 @@ def test_append_csv_compressed(self): def test_from_csv_raises_on_empty_file(self): # Create empty file - path = "tmp/empty.csv" - open(path, "a").close() + path = 'tmp/empty.csv' + open(path, 'a').close() self.assertRaises(ValueError, Table.from_csv, path) @@ -234,16 +228,16 @@ def test_to_csv_zip(self): try: # Test using the to_csv() method - self.tbl.to_csv("myzip.zip") - tmp = zip_archive.unzip_archive("myzip.zip") + self.tbl.to_csv('myzip.zip') + tmp = zip_archive.unzip_archive('myzip.zip') assert_matching_tables(self.tbl, Table.from_csv(tmp)) # Test using the to_csv_zip() method - self.tbl.to_zip_csv("myzip.zip") - tmp = zip_archive.unzip_archive("myzip.zip") + self.tbl.to_zip_csv('myzip.zip') + tmp = zip_archive.unzip_archive('myzip.zip') assert_matching_tables(self.tbl, Table.from_csv(tmp)) finally: - os.unlink("myzip.zip") + os.unlink('myzip.zip') def test_to_civis(self): @@ -251,7 +245,7 @@ def test_to_civis(self): pass def test_to_from_json(self): - path = "tmp/test.json" + path = 'tmp/test.json' self.tbl.to_json(path) result_tbl = Table.from_json(path) @@ -259,7 +253,7 @@ def test_to_from_json(self): os.remove(path) def test_to_from_json_compressed(self): - path = "tmp/test.json.gz" + path = 'tmp/test.json.gz' self.tbl.to_json(path) result_tbl = Table.from_json(path) @@ -272,12 +266,12 @@ def test_to_from_temp_json(self): assert_matching_tables(self.tbl, result_tbl) def test_to_from_temp_json_compressed(self): - path = self.tbl.to_json(temp_file_compression="gzip") + path = self.tbl.to_json(temp_file_compression='gzip') result_tbl = Table.from_json(path) assert_matching_tables(self.tbl, result_tbl) def test_to_from_json_line_delimited(self): - path = "tmp/test.json" + path = 'tmp/test.json' self.tbl.to_json(path, line_delimited=True) result_tbl = Table.from_json(path, line_delimited=True) @@ -285,7 +279,7 @@ def test_to_from_json_line_delimited(self): os.remove(path) def test_to_from_json_line_delimited_compressed(self): - path = "tmp/test.json.gz" + path = 'tmp/test.json.gz' self.tbl.to_json(path, line_delimited=True) result_tbl = Table.from_json(path, line_delimited=True) @@ -294,271 +288,250 @@ def test_to_from_json_line_delimited_compressed(self): def test_columns(self): # Test that columns are listed correctly - self.assertEqual(self.tbl.columns, ["first", "last"]) + self.assertEqual(self.tbl.columns, ['first', 'last']) def test_add_column(self): # Test that a new column is added correctly - self.tbl.add_column("middle", index=1) - self.assertEqual(self.tbl.columns[1], "middle") + self.tbl.add_column('middle', index=1) + self.assertEqual(self.tbl.columns[1], 'middle') def test_column_add_dupe(self): # Test that we can't add an existing column name - self.assertRaises(ValueError, self.tbl.add_column, "first") - - def test_add_column_if_exists(self): - self.tbl.add_column("first", if_exists="replace") - self.assertEqual(self.tbl.columns, ["first", "last"]) + self.assertRaises(ValueError, self.tbl.add_column, 'first') def test_remove_column(self): # Test that column is removed correctly - self.tbl.remove_column("first") - self.assertNotEqual(self.tbl.data[0], "first") + self.tbl.remove_column('first') + self.assertNotEqual(self.tbl.data[0], 'first') def test_rename_column(self): # Test that you can rename a column - self.tbl.rename_column("first", "f") - self.assertEqual(self.tbl.columns[0], "f") + self.tbl.rename_column('first', 'f') + self.assertEqual(self.tbl.columns[0], 'f') def test_column_rename_dupe(self): # Test that we can't rename to a column that already exists - self.assertRaises(ValueError, self.tbl.rename_column, "last", "first") + self.assertRaises(ValueError, self.tbl.rename_column, 'last', 'first') def test_fill_column(self): # Test that the column is filled tbl = Table(self.lst) # Fixed Value - tbl.fill_column("c", 0) - self.assertEqual(list(tbl.table["c"]), [0] * tbl.num_rows) + tbl.fill_column('c', 0) + self.assertEqual(list(tbl.table['c']), [0] * tbl.num_rows) # Calculated Value - tbl.fill_column("c", lambda x: x["b"] * 2) - self.assertEqual(list(tbl.table["c"]), [x["b"] * 2 for x in self.lst]) + tbl.fill_column('c', lambda x: x['b'] * 2) + self.assertEqual(list(tbl.table['c']), [x['b'] * 2 for x in self.lst]) def test_fillna_column(self): # Test that None values in the column are filled - self.lst = [ - {"a": 1, "b": 2, "c": 3}, - {"a": 4, "b": 5, "c": None}, - {"a": 7, "b": 8, "c": 9}, - {"a": 10, "b": 11, "c": None}, - {"a": 13, "b": 14, "c": 15}, - ] + self.lst = [{'a': 1, 'b': 2, 'c': 3}, + {'a': 4, 'b': 5, 'c': None}, + {'a': 7, 'b': 8, 'c': 9}, + {'a': 10, 'b': 11, 'c': None}, + {'a': 13, 'b': 14, 'c': 15}] # Fixed Value only tbl = Table(self.lst) - tbl.fillna_column("c", 0) - self.assertEqual(list(tbl.table["c"]), [3, 0, 9, 0, 15]) + tbl.fillna_column('c', 0) + self.assertEqual(list(tbl.table['c']), [3, 0, 9, 0, 15]) def test_move_column(self): # Test moving a column from end to front - self.tbl.move_column("last", 0) - self.assertEqual(self.tbl.columns[0], "last") + self.tbl.move_column('last', 0) + self.assertEqual(self.tbl.columns[0], 'last') def test_convert_column(self): # Test that column updates - self.tbl.convert_column("first", "upper") - self.assertEqual(self.tbl[0], {"first": "BOB", "last": "Smith"}) + self.tbl.convert_column('first', 'upper') + self.assertEqual(self.tbl[0], {'first': 'BOB', 'last': 'Smith'}) def test_convert_columns_to_str(self): # Test that all columns are string mixed_raw = [ - {"col1": 1, "col2": 2, "col3": 3}, - {"col1": "one", "col2": 2, "col3": [3, "three", 3.0]}, - {"col1": {"one": 1, "two": 2.0}, "col2": None, "col3": "three"}, + {'col1': 1, 'col2': 2, 'col3': 3}, + {'col1': 'one', 'col2': 2, 'col3': [3, 'three', 3.0]}, + {'col1': {'one': 1, "two": 2.0}, 'col2': None, "col3": 'three'} ] tbl = Table(mixed_raw) tbl.convert_columns_to_str() cols = tbl.get_columns_type_stats() - type_set = {i for x in cols for i in x["type"]} - self.assertTrue("str" in type_set and len(type_set) == 1) + type_set = {i for x in cols for i in x['type']} + self.assertTrue('str' in type_set and len(type_set) == 1) def test_convert_table(self): # Test that the table updates - self.tbl.convert_table("upper") - self.assertEqual(self.tbl[0], {"first": "BOB", "last": "SMITH"}) + self.tbl.convert_table('upper') + self.assertEqual(self.tbl[0], {'first': 'BOB', 'last': 'SMITH'}) def test_coalesce_columns(self): # Test coalescing into an existing column test_raw = [ - {"first": "Bob", "last": "Smith", "lastname": None}, - {"first": "Jane", "last": "", "lastname": "Doe"}, - {"first": "Mary", "last": "Simpson", "lastname": "Peters"}, + {'first': 'Bob', 'last': 'Smith', 'lastname': None}, + {'first': 'Jane', 'last': '', 'lastname': 'Doe'}, + {'first': 'Mary', 'last': 'Simpson', 'lastname': 'Peters'}, ] tbl = Table(test_raw) - tbl.coalesce_columns("last", ["last", "lastname"]) - - expected = Table( - [ - {"first": "Bob", "last": "Smith"}, - {"first": "Jane", "last": "Doe"}, - {"first": "Mary", "last": "Simpson"}, - ] - ) + tbl.coalesce_columns('last', ['last', 'lastname']) + + expected = Table([ + {'first': 'Bob', 'last': 'Smith'}, + {'first': 'Jane', 'last': 'Doe'}, + {'first': 'Mary', 'last': 'Simpson'}, + ]) assert_matching_tables(tbl, expected) # Test coalescing into a new column tbl = Table(test_raw) - tbl.coalesce_columns("new_last", ["last", "lastname"]) - expected = Table( - [ - {"first": "Bob", "new_last": "Smith"}, - {"first": "Jane", "new_last": "Doe"}, - {"first": "Mary", "new_last": "Simpson"}, - ] - ) + tbl.coalesce_columns('new_last', ['last', 'lastname']) + expected = Table([ + {'first': 'Bob', 'new_last': 'Smith'}, + {'first': 'Jane', 'new_last': 'Doe'}, + {'first': 'Mary', 'new_last': 'Simpson'}, + ]) assert_matching_tables(tbl, expected) def test_unpack_dict(self): - test_dict = [{"a": 1, "b": {"nest1": 1, "nest2": 2}}] + test_dict = [{'a': 1, 'b': {'nest1': 1, 'nest2': 2}}] test_table = Table(test_dict) # Test that dict at the top level - test_table.unpack_dict("b", prepend=False) - self.assertEqual(test_table.columns, ["a", "nest1", "nest2"]) + test_table.unpack_dict('b', prepend=False) + self.assertEqual(test_table.columns, ['a', 'nest1', 'nest2']) def test_unpack_list(self): - test_table = Table([{"a": 1, "b": [1, 2, 3]}]) + test_table = Table([{'a': 1, 'b': [1, 2, 3]}]) # Test that list at the top level - test_table.unpack_list("b", replace=True) - self.assertEqual(["a", "b_0", "b_1", "b_2"], test_table.columns) + test_table.unpack_list('b', replace=True) + self.assertEqual(['a', 'b_0', 'b_1', 'b_2'], test_table.columns) def test_unpack_list_with_mixed_col(self): # Test unpacking column with non-list items - mixed_tbl = Table([{"id": 1, "tag": [1, 2, None, 4]}, {"id": 2, "tag": None}]) - tbl_unpacked = Table(mixed_tbl.unpack_list("tag")) + mixed_tbl = Table([{'id': 1, 'tag': [1, 2, None, 4]}, {'id': 2, 'tag': None}]) + tbl_unpacked = Table(mixed_tbl.unpack_list('tag')) # Make sure result has the right number of columns self.assertEqual(len(tbl_unpacked.columns), 5) - result_table = Table( - [ - {"id": 1, "tag_0": 1, "tag_1": 2, "tag_2": None, "tag_3": 4}, - {"id": 2, "tag_0": None, "tag_1": None, "tag_2": None, "tag_3": None}, - ] - ) + result_table = Table([ + {'id': 1, 'tag_0': 1, 'tag_1': 2, 'tag_2': None, 'tag_3': 4}, + {'id': 2, 'tag_0': None, 'tag_1': None, 'tag_2': None, 'tag_3': None}]) # Check that the values for both rows are distributed correctly self.assertEqual( result_table.data[0] + result_table.data[1], - tbl_unpacked.data[0] + tbl_unpacked.data[1], - ) + tbl_unpacked.data[0] + tbl_unpacked.data[1]) def test_unpack_nested_columns_as_rows(self): # A Table with mixed content - test_table = Table( - [ - {"id": 1, "nested": {"A": 1, "B": 2, "C": 3}, "extra": "hi"}, - {"id": 2, "nested": {"A": 4, "B": 5, "I": 6}, "extra": "hi"}, - {"id": 3, "nested": "string!", "extra": "hi"}, - {"id": 4, "nested": None, "extra": "hi"}, - {"id": 5, "nested": ["this!", "is!", "a!", "list!"], "extra": "hi"}, - ] - ) - - standalone = test_table.unpack_nested_columns_as_rows("nested") + test_table = Table([ + {'id': 1, 'nested': {'A': 1, 'B': 2, 'C': 3}, 'extra': 'hi'}, + {'id': 2, 'nested': {'A': 4, 'B': 5, 'I': 6}, 'extra': 'hi'}, + {'id': 3, 'nested': 'string!', 'extra': 'hi'}, + {'id': 4, 'nested': None, 'extra': 'hi'}, + {'id': 5, 'nested': ['this!', 'is!', 'a!', 'list!'], 'extra': 'hi'} + ]) + + standalone = test_table.unpack_nested_columns_as_rows('nested') # Check that the columns are as expected - self.assertEqual(["uid", "id", "nested", "value"], standalone.columns) + self.assertEqual(['uid', 'id', 'nested', 'value'], standalone.columns) # Check that the row count is as expected self.assertEqual(standalone.num_rows, 11) # Check that the uids are unique, indicating that each row is unique - self.assertEqual(len({row["uid"] for row in standalone}), 11) + self.assertEqual(len({row['uid'] for row in standalone}), 11) def test_unpack_nested_columns_as_rows_expanded(self): - test_table = Table( - [ - {"id": 1, "nested": {"A": 1, "B": 2, "C": 3}, "extra": "hi"}, - {"id": 2, "nested": {"A": 4, "B": 5, "I": 6}, "extra": "hi"}, - {"id": 3, "nested": "string!", "extra": "hi"}, - {"id": 4, "nested": None, "extra": "hi"}, - {"id": 5, "nested": ["this!", "is!", "a!", "list!"], "extra": "hi"}, - ] - ) + test_table = Table([ + {'id': 1, 'nested': {'A': 1, 'B': 2, 'C': 3}, 'extra': 'hi'}, + {'id': 2, 'nested': {'A': 4, 'B': 5, 'I': 6}, 'extra': 'hi'}, + {'id': 3, 'nested': 'string!', 'extra': 'hi'}, + {'id': 4, 'nested': None, 'extra': 'hi'}, + {'id': 5, 'nested': ['this!', 'is!', 'a!', 'list!'], 'extra': 'hi'} + ]) - expanded = test_table.unpack_nested_columns_as_rows( - "nested", expand_original=True - ) + expanded = test_table.unpack_nested_columns_as_rows('nested', expand_original=True) # Check that the columns are as expected - self.assertEqual( - ["uid", "id", "extra", "nested", "nested_value"], expanded.columns - ) + self.assertEqual(['uid', 'id', 'extra', 'nested', 'nested_value'], expanded.columns) # Check that the row count is as expected self.assertEqual(expanded.num_rows, 12) # Check that the uids are unique, indicating that each row is unique - self.assertEqual(len({row["uid"] for row in expanded}), 12) + self.assertEqual(len({row['uid'] for row in expanded}), 12) def test_cut(self): # Test that the cut works correctly - cut_tbl = self.tbl.cut("first") - self.assertEqual(cut_tbl.columns, ["first"]) + cut_tbl = self.tbl.cut('first') + self.assertEqual(cut_tbl.columns, ['first']) def test_row_select(self): - tbl = Table( - [["foo", "bar", "baz"], ["c", 4, 9.3], ["a", 2, 88.2], ["b", 1, 23.3]] - ) - expected = Table([{"foo": "a", "bar": 2, "baz": 88.2}]) + tbl = Table([['foo', 'bar', 'baz'], + ['c', 4, 9.3], + ['a', 2, 88.2], + ['b', 1, 23.3]]) + expected = Table([{'foo': 'a', 'bar': 2, 'baz': 88.2}]) # Try with this method select_tbl = tbl.select_rows("{foo} == 'a' and {baz} > 88.1") self.assertEqual(select_tbl.data[0], expected.data[0]) # And try with this method - select_tbl2 = tbl.select_rows(lambda row: row.foo == "a" and row.baz > 88.1) + select_tbl2 = tbl.select_rows(lambda row: row.foo == 'a' and row.baz > 88.1) self.assertEqual(select_tbl2.data[0], expected.data[0]) def test_remove_null_rows(self): # Test that null rows are removed from a single column - null_table = Table([{"a": 1, "b": 2}, {"a": 1, "b": None}]) - self.assertEqual(null_table.remove_null_rows("b").num_rows, 1) + null_table = Table([{'a': 1, 'b': 2}, {'a': 1, 'b': None}]) + self.assertEqual(null_table.remove_null_rows('b').num_rows, 1) # Teest that null rows are removed from multiple columns - null_table = Table([{"a": 1, "b": 2, "c": 3}, {"a": 1, "b": None, "c": 3}]) - self.assertEqual(null_table.remove_null_rows(["b", "c"]).num_rows, 1) + null_table = Table([{'a': 1, 'b': 2, 'c': 3}, {'a': 1, 'b': None, 'c': 3}]) + self.assertEqual(null_table.remove_null_rows(['b', 'c']).num_rows, 1) def test_long_table(self): # Create a long table, that is 4 rows long - tbl = Table([{"id": 1, "tag": [1, 2, 3, 4]}]) - self.assertEqual(tbl.long_table(["id"], "tag").num_rows, 4) + tbl = Table([{'id': 1, 'tag': [1, 2, 3, 4]}]) + self.assertEqual(tbl.long_table(['id'], 'tag').num_rows, 4) # Assert that column has been dropped - self.assertEqual(tbl.columns, ["id"]) + self.assertEqual(tbl.columns, ['id']) # Assert that column has been retained - tbl_keep = Table([{"id": 1, "tag": [1, 2, 3, 4]}]) - tbl_keep.long_table(["id"], "tag", retain_original=True) - self.assertEqual(tbl_keep.columns, ["id", "tag"]) + tbl_keep = Table([{'id': 1, 'tag': [1, 2, 3, 4]}]) + tbl_keep.long_table(['id'], 'tag', retain_original=True) + self.assertEqual(tbl_keep.columns, ['id', 'tag']) def test_long_table_with_na(self): # Create a long table that is 4 rows long - tbl = Table([{"id": 1, "tag": [1, 2, 3, 4]}, {"id": 2, "tag": None}]) - self.assertEqual(tbl.long_table(["id"], "tag").num_rows, 4) + tbl = Table([{'id': 1, 'tag': [1, 2, 3, 4]}, {'id': 2, 'tag': None}]) + self.assertEqual(tbl.long_table(['id'], 'tag').num_rows, 4) # Assert that column has been dropped - self.assertEqual(tbl.columns, ["id"]) + self.assertEqual(tbl.columns, ['id']) # Assert that column has been retained - tbl_keep = Table([{"id": 1, "tag": [1, 2, 3, 4]}, {"id": 2, "tag": None}]) - tbl_keep.long_table(["id"], "tag", retain_original=True) - self.assertEqual(tbl_keep.columns, ["id", "tag"]) + tbl_keep = Table([{'id': 1, 'tag': [1, 2, 3, 4]}, {'id': 2, 'tag': None}]) + tbl_keep.long_table(['id'], 'tag', retain_original=True) + self.assertEqual(tbl_keep.columns, ['id', 'tag']) def test_rows(self): # Test that there is only one row in the table @@ -566,7 +539,7 @@ def test_rows(self): def test_first(self): # Test that the first value in the table is returned. - self.assertEqual(self.tbl.first, "Bob") + self.assertEqual(self.tbl.first, 'Bob') # Test empty value returns None empty_tbl = Table([[1], [], [3]]) @@ -578,10 +551,10 @@ def test_get_item(self): # Test a valid column tbl = Table(self.lst) lst = [1, 4, 7, 10, 13] - self.assertEqual(tbl["a"], lst) + self.assertEqual(tbl['a'], lst) # Test a valid row - row = {"a": 4, "b": 5, "c": 6} + row = {'a': 4, 'b': 5, 'c': 6} self.assertEqual(tbl[1], row) def test_column_data(self): @@ -590,23 +563,23 @@ def test_column_data(self): # Test a valid column tbl = Table(self.lst) lst = [1, 4, 7, 10, 13] - self.assertEqual(tbl.column_data("a"), lst) + self.assertEqual(tbl.column_data('a'), lst) # Test an invalid column - self.assertRaises(TypeError, tbl["c"]) + self.assertRaises(TypeError, tbl['c']) def test_row_data(self): # Test a valid column tbl = Table(self.lst) - row = {"a": 4, "b": 5, "c": 6} + row = {'a': 4, 'b': 5, 'c': 6} self.assertEqual(tbl.row_data(1), row) def test_stack(self): tbl1 = self.tbl.select_rows(lambda x: x) - tbl2 = Table([{"first": "Mary", "last": "Nichols"}]) + tbl2 = Table([{'first': 'Mary', 'last': 'Nichols'}]) # Different column names shouldn't matter for stack() - tbl3 = Table([{"f": "Lucy", "l": "Peterson"}]) + tbl3 = Table([{'f': 'Lucy', 'l': 'Peterson'}]) tbl1.stack(tbl2, tbl3) expected_tbl = Table(petl.stack(self.tbl.table, tbl2.table, tbl3.table)) @@ -614,8 +587,8 @@ def test_stack(self): def test_concat(self): tbl1 = self.tbl.select_rows(lambda x: x) - tbl2 = Table([{"first": "Mary", "last": "Nichols"}]) - tbl3 = Table([{"first": "Lucy", "last": "Peterson"}]) + tbl2 = Table([{'first': 'Mary', 'last': 'Nichols'}]) + tbl3 = Table([{'first': 'Lucy', 'last': 'Peterson'}]) tbl1.concat(tbl2, tbl3) expected_tbl = Table(petl.cat(self.tbl.table, tbl2.table, tbl3.table)) @@ -635,14 +608,14 @@ def test_chunk(self): def test_match_columns(self): raw = [ - {"first name": "Mary", "LASTNAME": "Nichols", "Middle__Name": "D"}, - {"first name": "Lucy", "LASTNAME": "Peterson", "Middle__Name": "S"}, + {'first name': 'Mary', 'LASTNAME': 'Nichols', 'Middle__Name': 'D'}, + {'first name': 'Lucy', 'LASTNAME': 'Peterson', 'Middle__Name': 'S'}, ] tbl = Table(raw) desired_raw = [ - {"first_name": "Mary", "middle_name": "D", "last_name": "Nichols"}, - {"first_name": "Lucy", "middle_name": "S", "last_name": "Peterson"}, + {'first_name': 'Mary', 'middle_name': 'D', 'last_name': 'Nichols'}, + {'first_name': 'Lucy', 'middle_name': 'S', 'last_name': 'Peterson'}, ] desired_tbl = Table(desired_raw) @@ -656,8 +629,7 @@ def test_match_columns(self): Table(raw).match_columns, desired_tbl.columns, fuzzy_match=False, - if_missing_columns="fail", - ) + if_missing_columns='fail') # Test disable fuzzy matching, and fail due to the extra cols self.assertRaises( @@ -665,95 +637,56 @@ def test_match_columns(self): Table(raw).match_columns, desired_tbl.columns, fuzzy_match=False, - if_extra_columns="fail", - ) + if_extra_columns='fail') # Test table that already has the right columns, shouldn't need fuzzy match tbl = Table(desired_raw) tbl.match_columns( desired_tbl.columns, fuzzy_match=False, - if_missing_columns="fail", - if_extra_columns="fail", - ) + if_missing_columns='fail', + if_extra_columns='fail') assert_matching_tables(desired_tbl, tbl) # Test table with missing col, verify the missing col gets added by default - tbl = Table( - [ - {"first name": "Mary", "LASTNAME": "Nichols"}, - {"first name": "Lucy", "LASTNAME": "Peterson"}, - ] - ) + tbl = Table([ + {'first name': 'Mary', 'LASTNAME': 'Nichols'}, + {'first name': 'Lucy', 'LASTNAME': 'Peterson'}, + ]) tbl.match_columns(desired_tbl.columns) desired_tbl = ( - Table(desired_raw) - .remove_column("middle_name") - .add_column("middle_name", index=1) - ) + Table(desired_raw).remove_column('middle_name').add_column('middle_name', index=1)) assert_matching_tables(desired_tbl, tbl) # Test table with extra col, verify the extra col gets removed by default - tbl = Table( - [ - { - "first name": "Mary", - "LASTNAME": "Nichols", - "Age": 32, - "Middle__Name": "D", - }, - { - "first name": "Lucy", - "LASTNAME": "Peterson", - "Age": 26, - "Middle__Name": "S", - }, - ] - ) + tbl = Table([ + {'first name': 'Mary', 'LASTNAME': 'Nichols', 'Age': 32, 'Middle__Name': 'D'}, + {'first name': 'Lucy', 'LASTNAME': 'Peterson', 'Age': 26, 'Middle__Name': 'S'}, + ]) desired_tbl = Table(desired_raw) tbl.match_columns(desired_tbl.columns) assert_matching_tables(desired_tbl, tbl) # Test table with two columns that normalize the same and aren't in desired cols, verify # they both get removed. - tbl = Table( - [ - { - "first name": "Mary", - "LASTNAME": "Nichols", - "Age": 32, - "Middle__Name": "D", - "AGE": None, - }, - { - "first name": "Lucy", - "LASTNAME": "Peterson", - "Age": 26, - "Middle__Name": "S", - "AGE": None, - }, - ] - ) + tbl = Table([ + { + 'first name': 'Mary', 'LASTNAME': 'Nichols', + 'Age': 32, 'Middle__Name': 'D', 'AGE': None + }, + { + 'first name': 'Lucy', 'LASTNAME': 'Peterson', + 'Age': 26, 'Middle__Name': 'S', 'AGE': None + }, + ]) tbl.match_columns(desired_tbl.columns) assert_matching_tables(desired_tbl, tbl) # Test table with two columns that match desired cols, verify only the first gets kept. - tbl = Table( - [ - { - "first name": "Mary", - "LASTNAME": "Nichols", - "First Name": None, - "Middle__Name": "D", - }, - { - "first name": "Lucy", - "LASTNAME": "Peterson", - "First Name": None, - "Middle__Name": "S", - }, - ] - ) + tbl = Table([ + {'first name': 'Mary', 'LASTNAME': 'Nichols', 'First Name': None, 'Middle__Name': 'D'}, + {'first name': 'Lucy', 'LASTNAME': 'Peterson', 'First Name': None, 'Middle__Name': 'S'}, + ]) tbl.match_columns(desired_tbl.columns) assert_matching_tables(desired_tbl, tbl) @@ -763,209 +696,131 @@ def test_to_dicts(self): def test_reduce_rows(self): table = [ - ["foo", "bar"], - ["a", 3], - ["a", 7], - ["b", 2], - ["b", 1], - ["b", 9], - ["c", 4], - ] + ['foo', 'bar'], + ['a', 3], + ['a', 7], + ['b', 2], + ['b', 1], + ['b', 9], + ['c', 4]] expected = [ {"foo": "a", "barsum": 10}, {"foo": "b", "barsum": 12}, - {"foo": "c", "barsum": 4}, - ] + {"foo": "c", "barsum": 4}] ptable = Table(table) ptable.reduce_rows( - "foo", + 'foo', lambda key, rows: [key, sum(row[1] for row in rows)], - ["foo", "barsum"], - ) + ['foo', 'barsum']) self.assertEqual(expected, ptable.to_dicts()) def test_map_columns_exact(self): - input_tbl = Table([["fn", "ln", "MID"], ["J", "B", "H"]]) + input_tbl = Table([['fn', 'ln', 'MID'], ['J', 'B', 'H']]) - column_map = { - "first_name": ["fn", "first"], - "last_name": ["last", "ln"], - "middle_name": ["mi"], - } + column_map = {'first_name': ['fn', 'first'], + 'last_name': ['last', 'ln'], + 'middle_name': ['mi']} - exact_tbl = Table([["first_name", "last_name", "MID"], ["J", "B", "H"]]) + exact_tbl = Table([['first_name', 'last_name', 'MID'], + ['J', 'B', 'H']]) input_tbl.map_columns(column_map) assert_matching_tables(input_tbl, exact_tbl) def test_map_columns_fuzzy(self): - input_tbl = Table([["fn", "ln", "Mi_"], ["J", "B", "H"]]) + input_tbl = Table([['fn', 'ln', 'Mi_'], ['J', 'B', 'H']]) - column_map = { - "first_name": ["fn", "first"], - "last_name": ["last", "ln"], - "middle_name": ["mi"], - } + column_map = {'first_name': ['fn', 'first'], + 'last_name': ['last', 'ln'], + 'middle_name': ['mi']} - fuzzy_tbl = Table([["first_name", "last_name", "middle_name"], ["J", "B", "H"]]) + fuzzy_tbl = Table([['first_name', 'last_name', 'middle_name'], + ['J', 'B', 'H']]) input_tbl.map_columns(column_map, exact_match=False) assert_matching_tables(input_tbl, fuzzy_tbl) def test_get_column_max_with(self): - tbl = Table( - [ - ["a", "b", "c"], - ["wide_text", False, "slightly longer text"], - ["text", 2, "byte_text🏽‍⚕️✊🏽🤩"], - ] - ) + tbl = Table([ + ['a', 'b', 'c'], + ['wide_text', False, 'slightly longer text'], + ['text', 2, 'byte_text🏽‍⚕️✊🏽🤩'] + ]) # Basic test - self.assertEqual(tbl.get_column_max_width("a"), 9) + self.assertEqual(tbl.get_column_max_width('a'), 9) # Doesn't break for non-strings - self.assertEqual(tbl.get_column_max_width("b"), 5) + self.assertEqual(tbl.get_column_max_width('b'), 5) # Evaluates based on byte length rather than char length - self.assertEqual(tbl.get_column_max_width("c"), 33) + self.assertEqual(tbl.get_column_max_width('c'), 33) def test_sort(self): # Test basic sort - unsorted_tbl = Table([["a", "b"], [3, 1], [2, 2], [1, 3]]) + unsorted_tbl = Table([['a', 'b'], [3, 1], [2, 2], [1, 3]]) sorted_tbl = unsorted_tbl.sort() - self.assertEqual(sorted_tbl[0], {"a": 1, "b": 3}) + self.assertEqual(sorted_tbl[0], {'a': 1, 'b': 3}) # Test column sort - unsorted_tbl = Table([["a", "b"], [3, 1], [2, 2], [1, 3]]) - sorted_tbl = unsorted_tbl.sort("b") - self.assertEqual(sorted_tbl[0], {"a": 3, "b": 1}) + unsorted_tbl = Table([['a', 'b'], [3, 1], [2, 2], [1, 3]]) + sorted_tbl = unsorted_tbl.sort('b') + self.assertEqual(sorted_tbl[0], {'a': 3, 'b': 1}) # Test reverse sort - unsorted_tbl = Table([["a", "b"], [3, 1], [2, 2], [1, 3]]) + unsorted_tbl = Table([['a', 'b'], [3, 1], [2, 2], [1, 3]]) sorted_tbl = unsorted_tbl.sort(reverse=True) - self.assertEqual(sorted_tbl[0], {"a": 3, "b": 1}) + self.assertEqual(sorted_tbl[0], {'a': 3, 'b': 1}) def test_set_header(self): # Rename columns - tbl = Table([["one", "two"], [1, 2], [3, 4]]) - new_tbl = tbl.set_header(["oneone", "twotwo"]) + tbl = Table([['one', 'two'], [1, 2], [3, 4]]) + new_tbl = tbl.set_header(['oneone', 'twotwo']) - self.assertEqual(new_tbl[0], {"oneone": 1, "twotwo": 2}) + self.assertEqual(new_tbl[0], {'oneone': 1, 'twotwo': 2}) # Change number of columns - tbl = Table([["one", "two"], [1, 2], [3, 4]]) - new_tbl = tbl.set_header(["one"]) + tbl = Table([['one', 'two'], [1, 2], [3, 4]]) + new_tbl = tbl.set_header(['one']) - self.assertEqual(new_tbl[0], {"one": 1}) + self.assertEqual(new_tbl[0], {'one': 1}) def test_bool(self): empty = Table() - not_empty = Table([{"one": 1, "two": 2}]) + not_empty = Table([{'one': 1, 'two': 2}]) self.assertEqual(not empty, True) self.assertEqual(not not_empty, False) def test_use_petl(self): # confirm that this method doesn't exist for parsons.Table - self.assertRaises(AttributeError, getattr, Table, "skipcomments") - - tbl = Table( - [ - ["col1", "col2"], - [ - "# this is a comment row", - ], - ["a", 1], - ["#this is another comment", "this is also ignored"], - ["b", 2], - ] - ) - tbl_expected = Table([["col1", "col2"], ["a", 1], ["b", 2]]) - - tbl_after = tbl.use_petl("skipcomments", "#") + self.assertRaises(AttributeError, getattr, Table, 'skipcomments') + + tbl = Table([ + ['col1', 'col2'], + ['# this is a comment row', ], + ['a', 1], + ['#this is another comment', 'this is also ignored'], + ['b', 2] + ]) + tbl_expected = Table([ + ['col1', 'col2'], + ['a', 1], + ['b', 2] + ]) + + tbl_after = tbl.use_petl('skipcomments', '#') assert_matching_tables(tbl_expected, tbl_after) - tbl.use_petl("skipcomments", "#", update_table=True) + tbl.use_petl('skipcomments', '#', update_table=True) assert_matching_tables(tbl_expected, tbl) from petl.util.base import Table as PetlTable - - tbl_petl = tbl.use_petl("skipcomments", "#", to_petl=True) + tbl_petl = tbl.use_petl('skipcomments', '#', to_petl=True) self.assertIsInstance(tbl_petl, PetlTable) - - def test_deduplicate(self): - # Confirm deduplicate works with no keys for one-column duplicates - tbl = Table([["a"], [1], [2], [2], [3]]) - tbl_expected = Table([["a"], [1], [2], [3]]) - tbl.deduplicate() - assert_matching_tables(tbl_expected, tbl) - - # Confirm deduplicate works with no keys for multiple columns - tbl = Table([["a", "b"], [1, 2], [1, 2], [1, 3], [2, 3]]) - tbl_expected = Table( - [ - ["a", "b"], - [1, 2], - [1, 3], - [2, 3], - ] - ) - tbl.deduplicate() - assert_matching_tables(tbl_expected, tbl) - - # Confirm deduplicate works with one key for multiple columns - tbl = Table([["a", "b"], [1, 3], [1, 2], [1, 2], [2, 3]]) - tbl_expected = Table( - [ - ["a", "b"], - [1, 3], - [2, 3], - ] - ) - tbl.deduplicate(keys=["a"]) - assert_matching_tables(tbl_expected, tbl) - - # Confirm sorting deduplicate works with one key for multiple columns - - # Note that petl sorts on the column(s) you're deduping on - # Meaning it will ignore the 'b' column below - # That is, the first row, [1,3], - # would not get moved to after the [1,2] - tbl = Table([["a", "b"], [2, 3], [1, 3], [1, 2], [1, 2]]) - tbl_expected = Table( - [ - ["a", "b"], - [1, 3], - [2, 3], - ] - ) - tbl.deduplicate(keys=["a"], presorted=False) - assert_matching_tables(tbl_expected, tbl) - - # Confirm sorting deduplicate works for two of two columns - tbl = Table([["a", "b"], [2, 3], [1, 3], [1, 2], [1, 2]]) - tbl_expected = Table( - [ - ["a", "b"], - [1, 2], - [1, 3], - [2, 3], - ] - ) - tbl.deduplicate(keys=["a", "b"], presorted=False) - assert_matching_tables(tbl_expected, tbl) - - # Confirm deduplicate works for multiple keys - tbl = Table( - [["a", "b", "c"], [1, 2, 3], [1, 2, 3], [1, 2, 4], [1, 3, 2], [2, 3, 4]] - ) - tbl_expected = Table([["a", "b", "c"], [1, 2, 3], [1, 3, 2], [2, 3, 4]]) - tbl.deduplicate(["a", "b"]) - assert_matching_tables(tbl_expected, tbl) diff --git a/test/test_facebook_ads.py b/test/test_facebook_ads.py index 7a0fe828a5..cec7668579 100644 --- a/test/test_facebook_ads.py +++ b/test/test_facebook_ads.py @@ -1,42 +1,28 @@ import unittest import os -from parsons import FacebookAds, Table - - -users_table = Table( - [ - { - "first": "Bob", - "middle": "J", - "last": "Smith", - "phone": "1234567890", - "cell": None, - "vb_voterbase_dob": "19820413", - }, - { - "first": "Sue", - "middle": "Lucy", - "last": "Doe", - "phone": None, - "cell": "2345678901", - "vb_voterbase_dob": None, - }, - ] -) - - -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +from parsons.facebook_ads.facebook_ads import FacebookAds +from parsons.etl.table import Table + +users_table = Table([ + {"first": "Bob", "middle": "J", "last": "Smith", "phone": "1234567890", "cell": None, + "vb_voterbase_dob": "19820413"}, + {"first": "Sue", "middle": "Lucy", "last": "Doe", "phone": None, "cell": "2345678901", + "vb_voterbase_dob": None}, +]) + + +@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test') class TestFacebookAdsIntegration(unittest.TestCase): + def setUp(self): self.fb_ads = FacebookAds() self.audience_id = self.fb_ads.create_custom_audience( - name="Test Audience", data_source="USER_PROVIDED_ONLY" - ) + name="Test Audience", + data_source="USER_PROVIDED_ONLY" + ) def tearDown(self): self.fb_ads.delete_custom_audience(self.audience_id) @@ -48,10 +34,8 @@ def test_create_custom_audience(self): def test_create_custom_audience_bad_data_source(self): self.assertRaises( KeyError, - self.fb_ads.create_custom_audience, - name="Something", - data_source="INVALID", - ) + self.fb_ads.create_custom_audience, name="Something", data_source="INVALID" + ) def test_add_users_to_custom_audience(self): # Note we don't actually check the results of adding these users, eg. how many were @@ -61,47 +45,35 @@ def test_add_users_to_custom_audience(self): def test_add_users_to_custom_audience_no_valid_columns(self): # We don't yet support full names for matching, so this shouldn't work - tbl = Table( - [ - {"full name": "Bob Smith"}, - ] - ) + tbl = Table([ + {"full name": "Bob Smith"}, + ]) self.assertRaises( - KeyError, self.fb_ads.add_users_to_custom_audience, self.audience_id, tbl - ) + KeyError, + self.fb_ads.add_users_to_custom_audience, self.audience_id, tbl + ) class TestFacebookAdsUtilities(unittest.TestCase): + def test_get_match_key_for_column(self): # Test just a few of the mappings - self.assertEqual("EMAIL", FacebookAds._get_match_key_for_column("email")) - self.assertEqual( - "EMAIL", FacebookAds._get_match_key_for_column("voterbase_email") - ) - self.assertEqual("FN", FacebookAds._get_match_key_for_column("first name")) - self.assertEqual("FN", FacebookAds._get_match_key_for_column("FIRST-NAME ")) - self.assertEqual( - "FN", FacebookAds._get_match_key_for_column("vb_tsmart_first_name") - ) - self.assertEqual("LN", FacebookAds._get_match_key_for_column("Last Name!")) - self.assertEqual("ST", FacebookAds._get_match_key_for_column("state code")) - self.assertEqual( - "ST", FacebookAds._get_match_key_for_column("vb_vf_source_state") - ) - self.assertEqual( - "GEN", FacebookAds._get_match_key_for_column("vb_voterbase_gender") - ) + self.assertEqual('EMAIL', FacebookAds._get_match_key_for_column('email')) + self.assertEqual('EMAIL', FacebookAds._get_match_key_for_column('voterbase_email')) + self.assertEqual('FN', FacebookAds._get_match_key_for_column('first name')) + self.assertEqual('FN', FacebookAds._get_match_key_for_column('FIRST-NAME ')) + self.assertEqual('FN', FacebookAds._get_match_key_for_column('vb_tsmart_first_name')) + self.assertEqual('LN', FacebookAds._get_match_key_for_column('Last Name!')) + self.assertEqual('ST', FacebookAds._get_match_key_for_column('state code')) + self.assertEqual('ST', FacebookAds._get_match_key_for_column('vb_vf_source_state')) + self.assertEqual('GEN', FacebookAds._get_match_key_for_column('vb_voterbase_gender')) self.assertEqual( - "PHONE", - FacebookAds._get_match_key_for_column("vb_voterbase_phone_wireless"), - ) - self.assertIsNone(FacebookAds._get_match_key_for_column("invalid")) + 'PHONE', FacebookAds._get_match_key_for_column('vb_voterbase_phone_wireless')) + self.assertIsNone(FacebookAds._get_match_key_for_column('invalid')) def test_get_preprocess_key_for_column(self): self.assertEqual( - "DOB YYYYMMDD", - FacebookAds._get_preprocess_key_for_column("vb_voterbase_dob"), - ) + 'DOB YYYYMMDD', FacebookAds._get_preprocess_key_for_column('vb_voterbase_dob')) def test_get_match_table_for_users_table(self): # This tests basic column matching, as well as the more complex cases like: @@ -127,12 +99,10 @@ def test_get_match_table_for_users_table(self): self.assertEqual("", row1["DOBD"]) def test_get_match_schema_and_data(self): - match_table = Table( - [ - {"FN": "Bob", "LN": "Smith"}, - {"FN": "Sue", "LN": "Doe"}, - ] - ) + match_table = Table([ + {"FN": "Bob", "LN": "Smith"}, + {"FN": "Sue", "LN": "Doe"}, + ]) (schema, data) = FacebookAds._get_match_schema_and_data(match_table) self.assertEqual(["FN", "LN"], schema) self.assertEqual(("Bob", "Smith"), data[0]) diff --git a/test/test_freshdesk/expected_json.py b/test/test_freshdesk/expected_json.py index 202c25afaa..02927cac7b 100644 --- a/test/test_freshdesk/expected_json.py +++ b/test/test_freshdesk/expected_json.py @@ -1,99 +1,92 @@ -test_agent = [ - { - "available": False, - "occasional": True, - "id": 47020956237, - "signature": '


\n
', - "ticket_scope": 1, - "created_at": "2020-01-17T15:07:01Z", - "updated_at": "2020-02-05T00:58:37Z", - "available_since": None, - "type": "support_agent", - "contact": { - "active": True, - "email": "person@email.org", - "job_title": None, - "language": "en", - "last_login_at": "2020-01-24T22:49:52Z", - "mobile": None, - "name": "Alissa", - "phone": None, - "time_zone": "Bogota", - "created_at": "2020-01-17T15:07:00Z", - "updated_at": "2020-01-24T22:49:52Z", - }, - } -] -test_ticket = [ - { - "cc_emails": ["person@email.org", "person2@email.org"], - "fwd_emails": [], - "reply_cc_emails": ["person@email.org", "person2@email.org"], - "ticket_cc_emails": ["person@email.org", "person2@email.org"], - "fr_escalated": False, - "spam": False, - "email_config_id": None, - "group_id": 47000643034, - "priority": 1, - "requester_id": 47021937449, - "responder_id": 47017224681, - "source": 3, - "company_id": 47000491688, - "status": 5, - "subject": "My thing is broken.", - "association_type": None, - "to_emails": None, - "product_id": None, - "id": 84, - "type": "Support Request 1", - "due_by": "2020-02-19T22:00:00Z", - "fr_due_by": "2020-02-06T17:00:00Z", - "is_escalated": False, - "custom_fields": {}, - "created_at": "2020-02-05T22:17:41Z", - "updated_at": "2020-02-06T02:07:37Z", - "associated_tickets_count": None, - "tags": [], - } -] +test_agent = [{ + 'available': False, + 'occasional': True, + 'id': 47020956237, + 'signature': '


\n
', + 'ticket_scope': 1, + 'created_at': '2020-01-17T15:07:01Z', + 'updated_at': '2020-02-05T00:58:37Z', + 'available_since': None, + 'type': 'support_agent', + 'contact': { + 'active': True, + 'email': 'person@email.org', + 'job_title': None, + 'language': 'en', + 'last_login_at': '2020-01-24T22:49:52Z', + 'mobile': None, + 'name': 'Alissa', + 'phone': None, + 'time_zone': 'Bogota', + 'created_at': '2020-01-17T15:07:00Z', + 'updated_at': '2020-01-24T22:49:52Z' + } +}] -test_company = [ - { - "id": 47000491701, - "name": "Big Org", - "description": None, - "note": None, - "domains": [], - "created_at": "2020-01-09T20:43:09Z", - "updated_at": "2020-01-09T20:43:09Z", - "custom_fields": {}, - "health_score": None, - "account_tier": "Tier 2", - "renewal_date": "2020-12-31T00:00:00Z", - "industry": None, - } -] +test_ticket = [{ + 'cc_emails': ['person@email.org', 'person2@email.org'], + 'fwd_emails': [], + 'reply_cc_emails': ['person@email.org', 'person2@email.org'], + 'ticket_cc_emails': ['person@email.org', 'person2@email.org'], + 'fr_escalated': False, + 'spam': False, + 'email_config_id': None, + 'group_id': 47000643034, + 'priority': 1, + 'requester_id': 47021937449, + 'responder_id': 47017224681, + 'source': 3, + 'company_id': 47000491688, + 'status': 5, + 'subject': 'My thing is broken.', + 'association_type': None, + 'to_emails': None, + 'product_id': None, + 'id': 84, + 'type': 'Support Request 1', + 'due_by': '2020-02-19T22:00:00Z', + 'fr_due_by': '2020-02-06T17:00:00Z', + 'is_escalated': False, + 'custom_fields': {}, + 'created_at': '2020-02-05T22:17:41Z', + 'updated_at': '2020-02-06T02:07:37Z', + 'associated_tickets_count': None, + 'tags': [] +}] -test_contact = [ - { - "active": False, - "address": None, - "company_id": 47000491686, - "description": None, - "email": "person@email.org", - "id": 47021299020, - "job_title": None, - "language": "en", - "mobile": None, - "name": "Percy Person", - "phone": "N/A", - "time_zone": "Bogota", - "twitter_id": None, - "custom_fields": {}, - "facebook_id": None, - "created_at": "2020-01-27T16:44:34Z", - "updated_at": "2020-01-27T16:44:34Z", - "unique_external_id": None, - } -] +test_company = [{ + 'id': 47000491701, + 'name': 'Big Org', + 'description': None, + 'note': None, + 'domains': [], + 'created_at': '2020-01-09T20:43:09Z', + 'updated_at': '2020-01-09T20:43:09Z', + 'custom_fields': {}, + 'health_score': None, + 'account_tier': 'Tier 2', + 'renewal_date': '2020-12-31T00:00:00Z', + 'industry': None +}] + +test_contact = [{ + 'active': False, + 'address': None, + 'company_id': 47000491686, + 'description': None, + 'email': 'person@email.org', + 'id': 47021299020, + 'job_title': None, + 'language': 'en', + 'mobile': None, + 'name': 'Percy Person', + 'phone': 'N/A', + 'time_zone': 'Bogota', + 'twitter_id': None, + 'custom_fields': {}, + 'facebook_id': None, + 'created_at': '2020-01-27T16:44:34Z', + 'updated_at': '2020-01-27T16:44:34Z', + 'unique_external_id': None +}] diff --git a/test/test_freshdesk/test_freshdesk.py b/test/test_freshdesk/test_freshdesk.py index 304979ca57..f1628fb7fa 100644 --- a/test/test_freshdesk/test_freshdesk.py +++ b/test/test_freshdesk/test_freshdesk.py @@ -1,13 +1,14 @@ -from parsons import Freshdesk +from parsons.freshdesk.freshdesk import Freshdesk import unittest import requests_mock from test.test_freshdesk import expected_json -DOMAIN = "myorg" -API_KEY = "mykey" +DOMAIN = 'myorg' +API_KEY = 'mykey' class TestFreshdesk(unittest.TestCase): + def setUp(self): self.fd = Freshdesk(DOMAIN, API_KEY) @@ -16,26 +17,26 @@ def setUp(self): def test_get_agents(self, m): # Test that agents are returned correctly. - m.get(self.fd.uri + "agents", json=expected_json.test_agent) + m.get(self.fd.uri + 'agents', json=expected_json.test_agent) self.fd.get_agents() @requests_mock.Mocker() def test_get_tickets(self, m): # Test that tickets are returned correctly. - m.get(self.fd.uri + "tickets", json=expected_json.test_ticket) + m.get(self.fd.uri + 'tickets', json=expected_json.test_ticket) self.fd.get_tickets() @requests_mock.Mocker() def test_get_companies(self, m): # Test that tickets are returned correctly. - m.get(self.fd.uri + "companies", json=expected_json.test_company) + m.get(self.fd.uri + 'companies', json=expected_json.test_company) self.fd.get_companies() @requests_mock.Mocker() def test_get_contacts(self, m): # Test that tickets are returned correctly. - m.get(self.fd.uri + "contacts", json=expected_json.test_contact) + m.get(self.fd.uri + 'contacts', json=expected_json.test_contact) self.fd.get_contacts() diff --git a/test/test_geocoder/test_census_geocoder.py b/test/test_geocoder/test_census_geocoder.py index 227aee1ec9..8d5167d116 100644 --- a/test/test_geocoder/test_census_geocoder.py +++ b/test/test_geocoder/test_census_geocoder.py @@ -1,16 +1,16 @@ import unittest import os from unittest import mock -from parsons import Table, CensusGeocoder +from parsons.etl import Table +from parsons.geocode import CensusGeocoder import petl from test_responses import geographies_resp, locations_resp, batch_resp, coord_resp from test.utils import assert_matching_tables -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test') class TestCensusGeocoder(unittest.TestCase): + def setUp(self): self.cg = CensusGeocoder() @@ -18,49 +18,46 @@ def setUp(self): def test_geocode_onelineaddress(self): self.cg.cg = mock.MagicMock() - address = "1600 Pennsylvania Avenue, Washington, DC" + address = '1600 Pennsylvania Avenue, Washington, DC' # Assert one line with geographies parameter returns expected self.cg.cg.onelineaddress = mock.MagicMock(return_value=geographies_resp) - geo = self.cg.geocode_onelineaddress(address, return_type="geographies") - self.cg.cg.onelineaddress.assert_called_with(address, returntype="geographies") + geo = self.cg.geocode_onelineaddress(address, return_type='geographies') + self.cg.cg.onelineaddress.assert_called_with(address, returntype='geographies') self.assertEqual(geo, geographies_resp) # Assert one line with locations parameter returns expected self.cg.cg.onelineaddress = mock.MagicMock(return_value=locations_resp) - geo = self.cg.geocode_onelineaddress(address, return_type="locations") - self.cg.cg.onelineaddress.assert_called_with(address, returntype="locations") + geo = self.cg.geocode_onelineaddress(address, return_type='locations') + self.cg.cg.onelineaddress.assert_called_with(address, returntype='locations') self.assertEqual(geo, locations_resp) def test_geocode_address(self): self.cg.cg = mock.MagicMock() - passed_address = { - "address_line": "1600 Pennsylvania Avenue", - "city": "Washington", - "state": "DC", - } + passed_address = {'address_line': '1600 Pennsylvania Avenue', + 'city': 'Washington', + 'state': 'DC' + } # Assert one line with geographies parameter returns expected self.cg.cg.address = mock.MagicMock(return_value=geographies_resp) - geo = self.cg.geocode_address(**passed_address, return_type="geographies") + geo = self.cg.geocode_address(**passed_address, return_type='geographies') self.assertEqual(geo, geographies_resp) # Assert one line with locations parameter returns expected self.cg.cg.address = mock.MagicMock(return_value=locations_resp) - geo = self.cg.geocode_address(**passed_address, return_type="locations") + geo = self.cg.geocode_address(**passed_address, return_type='locations') self.assertEqual(geo, locations_resp) def test_geocode_address_batch(self): - batch = [ - ["id", "street", "city", "state", "zip"], - ["1", "908 N Washtenaw", "Chicago", "IL", "60622"], - ["2", "1405 Wilshire Blvd", "Austin", "TX", "78722"], - ["3", "908 N Washtenaw", "Chicago", "IL", "60622"], - ["4", "1405 Wilshire Blvd", "Austin", "TX", "78722"], - ["5", "908 N Washtenaw", "Chicago", "IL", "60622"], - ] + batch = [['id', 'street', 'city', 'state', 'zip'], + ['1', '908 N Washtenaw', 'Chicago', 'IL', '60622'], + ['2', '1405 Wilshire Blvd', 'Austin', 'TX', '78722'], + ['3', '908 N Washtenaw', 'Chicago', 'IL', '60622'], + ['4', '1405 Wilshire Blvd', 'Austin', 'TX', '78722'], + ['5', '908 N Washtenaw', 'Chicago', 'IL', '60622']] tbl = Table(batch) @@ -72,5 +69,5 @@ def test_coordinates(self): # Assert coordinates data returns expected response. self.cg.cg.address = mock.MagicMock(return_value=coord_resp) - geo = self.cg.get_coordinates_data("38.8884212", "-77.0441907") + geo = self.cg.get_coordinates_data('38.8884212', '-77.0441907') self.assertEqual(geo, coord_resp) diff --git a/test/test_geocoder/test_responses.py b/test/test_geocoder/test_responses.py index 5d9ad81a4d..f2bb43ef37 100644 --- a/test/test_geocoder/test_responses.py +++ b/test/test_geocoder/test_responses.py @@ -2,482 +2,411 @@ geographies_resp = [ { - "matchedAddress": "1600 PENNSYLVANIA AVE NW, WASHINGTON, DC, 20500", - "coordinates": {"x": -77.03535, "y": 38.898754}, - "tigerLine": {"tigerLineId": "76225813", "side": "L"}, - "addressComponents": { - "fromAddress": "1600", - "toAddress": "1698", - "preQualifier": "", - "preDirection": "", - "preType": "", - "streetName": "PENNSYLVANIA", - "suffixType": "AVE", - "suffixDirection": "NW", - "suffixQualifier": "", - "city": "WASHINGTON", - "state": "DC", - "zip": "20500", - }, - "geographies": { - "2010 Census Blocks": [ - { - "SUFFIX": "", - "GEOID": "110010062021031", - "CENTLAT": "+38.8971157", - "BLOCK": "1031", - "AREAWATER": 0, - "STATE": "11", - "BASENAME": "1031", - "OID": 210403964788146, - "LSADC": "BK", - "FUNCSTAT": "S", - "INTPTLAT": "+38.8971157", - "STGEOMETRY.AREA": 151236.97, - "STGEOMETRY.LEN": 1505.6952, - "NAME": "Block 1031", - "OBJECTID": 6398361, - "TRACT": "006202", - "CENTLON": "-077.0365336", - "BLKGRP": "1", - "AREALAND": 91475, - "INTPTLON": "-077.0365336", - "MTFCC": "G5040", - "LWBLKTYP": "L", - "COUNTY": "001", - } - ], - "States": [ - { - "STATENS": "01702382", - "GEOID": "11", - "CENTLAT": "+38.9047577", - "AREAWATER": 18687196, - "BASENAME": "District of Columbia", - "STATE": "11", - "STUSAB": "DC", - "OID": 27490331294090, - "LSADC": "00", - "FUNCSTAT": "A", - "INTPTLAT": "+38.9041031", - "DIVISION": "5", - "STGEOMETRY.AREA": 292745184.0, - "STGEOMETRY.LEN": 86300.65, - "NAME": "District of Columbia", - "REGION": "3", - "OBJECTID": 54, - "CENTLON": "-077.0162863", - "AREALAND": 158340390, - "INTPTLON": "-077.0172290", - "MTFCC": "G4000", - } - ], - "Counties": [ - { - "GEOID": "11001", - "CENTLAT": "+38.9047577", - "AREAWATER": 18687196, - "BASENAME": "District of Columbia", - "STATE": "11", - "OID": 27590331264532, - "LSADC": "00", - "FUNCSTAT": "F", - "INTPTLAT": "+38.9041031", - "STGEOMETRY.AREA": 292745184.0, - "STGEOMETRY.LEN": 86300.65, - "NAME": "District of Columbia", - "OBJECTID": 632, - "CENTLON": "-077.0162863", - "COUNTYCC": "H6", - "COUNTYNS": "01702382", - "AREALAND": 158340390, - "INTPTLON": "-077.0172290", - "MTFCC": "G4020", - "COUNTY": "001", - } - ], - "Census Tracts": [ - { - "GEOID": "11001006202", - "CENTLAT": "+38.8801546", - "AREAWATER": 4970897, - "BASENAME": "62.02", - "STATE": "11", - "OID": 20790331304119, - "LSADC": "CT", - "FUNCSTAT": "S", - "INTPTLAT": "+38.8809933", - "STGEOMETRY.AREA": 19021758.0, - "STGEOMETRY.LEN": 34175.344, - "NAME": "Census Tract 62.02", - "OBJECTID": 47245, - "TRACT": "006202", - "CENTLON": "-077.0352173", - "AREALAND": 6539770, - "INTPTLON": "-077.0363219", - "MTFCC": "G5020", - "COUNTY": "001", - } - ], - }, - }, - { - "matchedAddress": "1600 PENNSYLVANIA AVE SE, WASHINGTON, DC, 20003", - "coordinates": {"x": -76.981895, "y": 38.87898}, - "tigerLine": {"tigerLineId": "638666807", "side": "L"}, - "addressComponents": { - "fromAddress": "1600", - "toAddress": "1698", - "preQualifier": "", - "preDirection": "", - "preType": "", - "streetName": "PENNSYLVANIA", - "suffixType": "AVE", - "suffixDirection": "SE", - "suffixQualifier": "", - "city": "WASHINGTON", - "state": "DC", - "zip": "20003", - }, - "geographies": { - "2010 Census Blocks": [ - { - "SUFFIX": "", - "GEOID": "110010068022013", - "CENTLAT": "+38.8798010", - "BLOCK": "2013", - "AREAWATER": 0, - "STATE": "11", - "BASENAME": "2013", - "OID": 210403964789891, - "LSADC": "BK", - "FUNCSTAT": "S", - "INTPTLAT": "+38.8798010", - "STGEOMETRY.AREA": 23260.527, - "STGEOMETRY.LEN": 958.01, - "NAME": "Block 2013", - "OBJECTID": 936586, - "TRACT": "006802", - "CENTLON": "-076.9828471", - "BLKGRP": "2", - "AREALAND": 14076, - "INTPTLON": "-076.9828471", - "MTFCC": "G5040", - "LWBLKTYP": "L", - "COUNTY": "001", - } - ], - "States": [ - { - "STATENS": "01702382", - "GEOID": "11", - "CENTLAT": "+38.9047577", - "AREAWATER": 18687196, - "BASENAME": "District of Columbia", - "STATE": "11", - "STUSAB": "DC", - "OID": 27490331294090, - "LSADC": "00", - "FUNCSTAT": "A", - "INTPTLAT": "+38.9041031", - "DIVISION": "5", - "STGEOMETRY.AREA": 292745184.0, - "STGEOMETRY.LEN": 86300.65, - "NAME": "District of Columbia", - "REGION": "3", - "OBJECTID": 54, - "CENTLON": "-077.0162863", - "AREALAND": 158340390, - "INTPTLON": "-077.0172290", - "MTFCC": "G4000", - } - ], - "Counties": [ - { - "GEOID": "11001", - "CENTLAT": "+38.9047577", - "AREAWATER": 18687196, - "BASENAME": "District of Columbia", - "STATE": "11", - "OID": 27590331264532, - "LSADC": "00", - "FUNCSTAT": "F", - "INTPTLAT": "+38.9041031", - "STGEOMETRY.AREA": 292745184.0, - "STGEOMETRY.LEN": 86300.65, - "NAME": "District of Columbia", - "OBJECTID": 632, - "CENTLON": "-077.0162863", - "COUNTYCC": "H6", - "COUNTYNS": "01702382", - "AREALAND": 158340390, - "INTPTLON": "-077.0172290", - "MTFCC": "G4020", - "COUNTY": "001", - } - ], - "Census Tracts": [ - { - "GEOID": "11001006802", - "CENTLAT": "+38.8832158", - "AREAWATER": 0, - "BASENAME": "68.02", - "STATE": "11", - "OID": 20790331304268, - "LSADC": "CT", - "FUNCSTAT": "S", - "INTPTLAT": "+38.8832158", - "STGEOMETRY.AREA": 462925.4, - "STGEOMETRY.LEN": 3235.1487, - "NAME": "Census Tract 68.02", - "OBJECTID": 47582, - "TRACT": "006802", - "CENTLON": "-076.9814483", - "AREALAND": 280108, - "INTPTLON": "-076.9814483", - "MTFCC": "G5020", - "COUNTY": "001", - } - ], - }, - }, + 'matchedAddress': '1600 PENNSYLVANIA AVE NW, WASHINGTON, DC, 20500', + 'coordinates': { + 'x': -77.03535, + 'y': 38.898754}, + 'tigerLine': { + 'tigerLineId': '76225813', + 'side': 'L'}, + 'addressComponents': { + 'fromAddress': '1600', + 'toAddress': '1698', + 'preQualifier': '', + 'preDirection': '', + 'preType': '', + 'streetName': 'PENNSYLVANIA', + 'suffixType': 'AVE', + 'suffixDirection': 'NW', + 'suffixQualifier': '', + 'city': 'WASHINGTON', + 'state': 'DC', + 'zip': '20500'}, + 'geographies': { + '2010 Census Blocks': [{ + 'SUFFIX': '', + 'GEOID': '110010062021031', + 'CENTLAT': '+38.8971157', + 'BLOCK': '1031', + 'AREAWATER': 0, + 'STATE': '11', + 'BASENAME': '1031', + 'OID': 210403964788146, + 'LSADC': 'BK', + 'FUNCSTAT': 'S', + 'INTPTLAT': '+38.8971157', + 'STGEOMETRY.AREA': 151236.97, + 'STGEOMETRY.LEN': 1505.6952, + 'NAME': 'Block 1031', + 'OBJECTID': 6398361, + 'TRACT': '006202', + 'CENTLON': '-077.0365336', + 'BLKGRP': '1', + 'AREALAND': 91475, + 'INTPTLON': '-077.0365336', + 'MTFCC': 'G5040', + 'LWBLKTYP': 'L', + 'COUNTY': '001' + }], + 'States': [{ + 'STATENS': '01702382', + 'GEOID': '11', + 'CENTLAT': '+38.9047577', + 'AREAWATER': 18687196, + 'BASENAME': 'District of Columbia', + 'STATE': '11', + 'STUSAB': 'DC', + 'OID': 27490331294090, + 'LSADC': '00', + 'FUNCSTAT': 'A', + 'INTPTLAT': '+38.9041031', + 'DIVISION': '5', + 'STGEOMETRY.AREA': 292745184.0, + 'STGEOMETRY.LEN': 86300.65, + 'NAME': 'District of Columbia', + 'REGION': '3', + 'OBJECTID': 54, + 'CENTLON': '-077.0162863', + 'AREALAND': 158340390, + 'INTPTLON': '-077.0172290', + 'MTFCC': 'G4000' + }], + 'Counties': [{ + 'GEOID': '11001', + 'CENTLAT': '+38.9047577', + 'AREAWATER': 18687196, + 'BASENAME': 'District of Columbia', + 'STATE': '11', + 'OID': 27590331264532, + 'LSADC': '00', + 'FUNCSTAT': 'F', + 'INTPTLAT': '+38.9041031', + 'STGEOMETRY.AREA': 292745184.0, + 'STGEOMETRY.LEN': 86300.65, + 'NAME': 'District of Columbia', + 'OBJECTID': 632, + 'CENTLON': '-077.0162863', + 'COUNTYCC': 'H6', + 'COUNTYNS': '01702382', + 'AREALAND': 158340390, + 'INTPTLON': '-077.0172290', + 'MTFCC': 'G4020', + 'COUNTY': '001' + }], + 'Census Tracts': [{ + 'GEOID': '11001006202', + 'CENTLAT': '+38.8801546', + 'AREAWATER': 4970897, + 'BASENAME': '62.02', + 'STATE': '11', + 'OID': 20790331304119, + 'LSADC': 'CT', + 'FUNCSTAT': 'S', + 'INTPTLAT': '+38.8809933', + 'STGEOMETRY.AREA': 19021758.0, + 'STGEOMETRY.LEN': 34175.344, + 'NAME': 'Census Tract 62.02', + 'OBJECTID': 47245, + 'TRACT': '006202', + 'CENTLON': '-077.0352173', + 'AREALAND': 6539770, + 'INTPTLON': '-077.0363219', + 'MTFCC': 'G5020', + 'COUNTY': '001' + }] + } + }, { + 'matchedAddress': '1600 PENNSYLVANIA AVE SE, WASHINGTON, DC, 20003', + 'coordinates': { + 'x': -76.981895, + 'y': 38.87898}, + 'tigerLine': { + 'tigerLineId': '638666807', + 'side': 'L'}, + 'addressComponents': { + 'fromAddress': '1600', + 'toAddress': '1698', + 'preQualifier': '', + 'preDirection': '', + 'preType': '', + 'streetName': 'PENNSYLVANIA', + 'suffixType': 'AVE', + 'suffixDirection': 'SE', + 'suffixQualifier': '', + 'city': 'WASHINGTON', + 'state': 'DC', + 'zip': '20003'}, + 'geographies': { + '2010 Census Blocks': [{ + 'SUFFIX': '', + 'GEOID': '110010068022013', + 'CENTLAT': '+38.8798010', + 'BLOCK': '2013', + 'AREAWATER': 0, + 'STATE': '11', + 'BASENAME': '2013', + 'OID': 210403964789891, + 'LSADC': 'BK', + 'FUNCSTAT': 'S', + 'INTPTLAT': '+38.8798010', + 'STGEOMETRY.AREA': 23260.527, + 'STGEOMETRY.LEN': 958.01, + 'NAME': 'Block 2013', + 'OBJECTID': 936586, + 'TRACT': '006802', + 'CENTLON': '-076.9828471', + 'BLKGRP': '2', + 'AREALAND': 14076, + 'INTPTLON': '-076.9828471', + 'MTFCC': 'G5040', + 'LWBLKTYP': 'L', + 'COUNTY': '001' + }], + 'States': [{ + 'STATENS': '01702382', + 'GEOID': '11', + 'CENTLAT': '+38.9047577', + 'AREAWATER': 18687196, + 'BASENAME': 'District of Columbia', + 'STATE': '11', + 'STUSAB': 'DC', + 'OID': 27490331294090, + 'LSADC': '00', + 'FUNCSTAT': 'A', + 'INTPTLAT': '+38.9041031', + 'DIVISION': '5', + 'STGEOMETRY.AREA': 292745184.0, + 'STGEOMETRY.LEN': 86300.65, + 'NAME': 'District of Columbia', + 'REGION': '3', + 'OBJECTID': 54, + 'CENTLON': '-077.0162863', + 'AREALAND': 158340390, + 'INTPTLON': '-077.0172290', + 'MTFCC': 'G4000' + }], + 'Counties': [{ + 'GEOID': '11001', + 'CENTLAT': '+38.9047577', + 'AREAWATER': 18687196, + 'BASENAME': 'District of Columbia', + 'STATE': '11', + 'OID': 27590331264532, + 'LSADC': '00', + 'FUNCSTAT': 'F', + 'INTPTLAT': '+38.9041031', + 'STGEOMETRY.AREA': 292745184.0, + 'STGEOMETRY.LEN': 86300.65, + 'NAME': 'District of Columbia', + 'OBJECTID': 632, + 'CENTLON': '-077.0162863', + 'COUNTYCC': 'H6', + 'COUNTYNS': '01702382', + 'AREALAND': 158340390, + 'INTPTLON': '-077.0172290', + 'MTFCC': 'G4020', + 'COUNTY': '001' + }], + 'Census Tracts': [{ + 'GEOID': '11001006802', + 'CENTLAT': '+38.8832158', + 'AREAWATER': 0, + 'BASENAME': '68.02', + 'STATE': '11', + 'OID': 20790331304268, + 'LSADC': 'CT', + 'FUNCSTAT': 'S', + 'INTPTLAT': '+38.8832158', + 'STGEOMETRY.AREA': 462925.4, + 'STGEOMETRY.LEN': 3235.1487, + 'NAME': 'Census Tract 68.02', + 'OBJECTID': 47582, + 'TRACT': '006802', + 'CENTLON': '-076.9814483', + 'AREALAND': 280108, + 'INTPTLON': '-076.9814483', + 'MTFCC': 'G5020', + 'COUNTY': '001' + }] + } + } ] -locations_resp = [ - { - "matchedAddress": "1600 PENNSYLVANIA AVE NW, WASHINGTON, DC, 20500", - "coordinates": {"x": -77.03535, "y": 38.898754}, - "tigerLine": {"tigerLineId": "76225813", "side": "L"}, - "addressComponents": { - "fromAddress": "1600", - "toAddress": "1698", - "preQualifier": "", - "preDirection": "", - "preType": "", - "streetName": "PENNSYLVANIA", - "suffixType": "AVE", - "suffixDirection": "NW", - "suffixQualifier": "", - "city": "WASHINGTON", - "state": "DC", - "zip": "20500", - }, - }, - { - "matchedAddress": "1600 PENNSYLVANIA AVE SE, WASHINGTON, DC, 20003", - "coordinates": {"x": -76.981895, "y": 38.87898}, - "tigerLine": {"tigerLineId": "638666807", "side": "L"}, - "addressComponents": { - "fromAddress": "1600", - "toAddress": "1698", - "preQualifier": "", - "preDirection": "", - "preType": "", - "streetName": "PENNSYLVANIA", - "suffixType": "AVE", - "suffixDirection": "SE", - "suffixQualifier": "", - "city": "WASHINGTON", - "state": "DC", - "zip": "20003", - }, - }, -] +locations_resp = [{ + 'matchedAddress': '1600 PENNSYLVANIA AVE NW, WASHINGTON, DC, 20500', + 'coordinates': { + 'x': -77.03535, + 'y': 38.898754 + }, + 'tigerLine': { + 'tigerLineId': '76225813', + 'side': 'L' + }, + 'addressComponents': { + 'fromAddress': '1600', + 'toAddress': '1698', + 'preQualifier': '', + 'preDirection': '', + 'preType': '', + 'streetName': 'PENNSYLVANIA', + 'suffixType': 'AVE', + 'suffixDirection': 'NW', + 'suffixQualifier': '', + 'city': 'WASHINGTON', + 'state': 'DC', + 'zip': '20500' + } + }, { + 'matchedAddress': '1600 PENNSYLVANIA AVE SE, WASHINGTON, DC, 20003', + 'coordinates': { + 'x': -76.981895, + 'y': 38.87898 + }, + 'tigerLine': { + 'tigerLineId': '638666807', + 'side': 'L' + }, + 'addressComponents': { + 'fromAddress': '1600', + 'toAddress': '1698', + 'preQualifier': '', + 'preDirection': '', + 'preType': '', + 'streetName': 'PENNSYLVANIA', + 'suffixType': 'AVE', + 'suffixDirection': 'SE', + 'suffixQualifier': '', + 'city': 'WASHINGTON', + 'state': 'DC', + 'zip': '20003' + } + }] coord_resp = { - "2010 Census Blocks": [ - { - "SUFFIX": "", - "GEOID": "110010062021092", - "CENTLAT": "+38.8888686", - "BLOCK": "1092", - "AREAWATER": 30352, - "STATE": "11", - "BASENAME": "1092", - "OID": 210403964787858, - "LSADC": "BK", - "FUNCSTAT": "S", - "INTPTLAT": "+38.8886931", - "STGEOMETRY.AREA": 221344.39, - "STGEOMETRY.LEN": 2851.458, - "NAME": "Block 1092", - "OBJECTID": 1695655, - "TRACT": "006202", - "CENTLON": "-077.0444327", - "BLKGRP": "1", - "AREALAND": 103558, - "INTPTLON": "-077.0452079", - "MTFCC": "G5040", - "LWBLKTYP": "B", - "COUNTY": "001", - "CENT": (-77.0444327, 38.8888686), - "INTPT": (-77.0452079, 38.8886931), - } - ], - "States": [ - { - "STATENS": "01702382", - "GEOID": "11", - "CENTLAT": "+38.9047577", - "AREAWATER": 18687196, - "BASENAME": "District of Columbia", - "STATE": "11", - "STUSAB": "DC", - "OID": 27490331294090, - "LSADC": "00", - "FUNCSTAT": "A", - "INTPTLAT": "+38.9041031", - "DIVISION": "5", - "STGEOMETRY.AREA": 292745184.0, - "STGEOMETRY.LEN": 86300.65, - "NAME": "District of Columbia", - "REGION": "3", - "OBJECTID": 54, - "CENTLON": "-077.0162863", - "AREALAND": 158340390, - "INTPTLON": "-077.0172290", - "MTFCC": "G4000", - "CENT": (-77.0162863, 38.9047577), - "INTPT": (-77.017229, 38.9041031), - } - ], - "Counties": [ - { - "GEOID": "11001", - "CENTLAT": "+38.9047577", - "AREAWATER": 18687196, - "BASENAME": "District of Columbia", - "STATE": "11", - "OID": 27590331264532, - "LSADC": "00", - "FUNCSTAT": "F", - "INTPTLAT": "+38.9041031", - "STGEOMETRY.AREA": 292745184.0, - "STGEOMETRY.LEN": 86300.65, - "NAME": "District of Columbia", - "OBJECTID": 632, - "CENTLON": "-077.0162863", - "COUNTYCC": "H6", - "COUNTYNS": "01702382", - "AREALAND": 158340390, - "INTPTLON": "-077.0172290", - "MTFCC": "G4020", - "COUNTY": "001", - "CENT": (-77.0162863, 38.9047577), - "INTPT": (-77.017229, 38.9041031), - } - ], - "Census Tracts": [ - { - "GEOID": "11001006202", - "CENTLAT": "+38.8801546", - "AREAWATER": 4970897, - "BASENAME": "62.02", - "STATE": "11", - "OID": 20790331304119, - "LSADC": "CT", - "FUNCSTAT": "S", - "INTPTLAT": "+38.8809933", - "STGEOMETRY.AREA": 19021758.0, - "STGEOMETRY.LEN": 34175.344, - "NAME": "Census Tract 62.02", - "OBJECTID": 47245, - "TRACT": "006202", - "CENTLON": "-077.0352173", - "AREALAND": 6539770, - "INTPTLON": "-077.0363219", - "MTFCC": "G5020", - "COUNTY": "001", - "CENT": (-77.0352173, 38.8801546), - "INTPT": (-77.0363219, 38.8809933), - } - ], -} + '2010 Census Blocks': [{ + 'SUFFIX': '', + 'GEOID': '110010062021092', + 'CENTLAT': '+38.8888686', + 'BLOCK': '1092', + 'AREAWATER': 30352, + 'STATE': '11', + 'BASENAME': '1092', + 'OID': 210403964787858, + 'LSADC': 'BK', + 'FUNCSTAT': 'S', + 'INTPTLAT': '+38.8886931', + 'STGEOMETRY.AREA': 221344.39, + 'STGEOMETRY.LEN': 2851.458, + 'NAME': 'Block 1092', + 'OBJECTID': 1695655, + 'TRACT': '006202', + 'CENTLON': '-077.0444327', + 'BLKGRP': '1', + 'AREALAND': 103558, + 'INTPTLON': '-077.0452079', + 'MTFCC': 'G5040', + 'LWBLKTYP': 'B', + 'COUNTY': '001', + 'CENT': (-77.0444327, 38.8888686), + 'INTPT': (-77.0452079, 38.8886931) + }], + 'States': [{ + 'STATENS': '01702382', + 'GEOID': '11', + 'CENTLAT': '+38.9047577', + 'AREAWATER': 18687196, + 'BASENAME': 'District of Columbia', + 'STATE': '11', + 'STUSAB': 'DC', + 'OID': 27490331294090, + 'LSADC': '00', + 'FUNCSTAT': 'A', + 'INTPTLAT': '+38.9041031', + 'DIVISION': '5', + 'STGEOMETRY.AREA': 292745184.0, + 'STGEOMETRY.LEN': 86300.65, + 'NAME': 'District of Columbia', + 'REGION': '3', + 'OBJECTID': 54, + 'CENTLON': '-077.0162863', + 'AREALAND': 158340390, + 'INTPTLON': '-077.0172290', + 'MTFCC': 'G4000', + 'CENT': (-77.0162863, 38.9047577), + 'INTPT': (-77.017229, 38.9041031) + }], + 'Counties': [{ + 'GEOID': '11001', + 'CENTLAT': '+38.9047577', + 'AREAWATER': 18687196, + 'BASENAME': 'District of Columbia', + 'STATE': '11', + 'OID': 27590331264532, + 'LSADC': '00', + 'FUNCSTAT': 'F', + 'INTPTLAT': '+38.9041031', + 'STGEOMETRY.AREA': 292745184.0, + 'STGEOMETRY.LEN': 86300.65, + 'NAME': 'District of Columbia', + 'OBJECTID': 632, + 'CENTLON': '-077.0162863', + 'COUNTYCC': 'H6', + 'COUNTYNS': '01702382', + 'AREALAND': 158340390, + 'INTPTLON': '-077.0172290', + 'MTFCC': 'G4020', + 'COUNTY': '001', + 'CENT': (-77.0162863, 38.9047577), + 'INTPT': (-77.017229, 38.9041031) + }], + 'Census Tracts': [{ + 'GEOID': '11001006202', + 'CENTLAT': '+38.8801546', + 'AREAWATER': 4970897, + 'BASENAME': '62.02', + 'STATE': '11', + 'OID': 20790331304119, + 'LSADC': 'CT', + 'FUNCSTAT': 'S', + 'INTPTLAT': '+38.8809933', + 'STGEOMETRY.AREA': 19021758.0, + 'STGEOMETRY.LEN': 34175.344, + 'NAME': 'Census Tract 62.02', + 'OBJECTID': 47245, + 'TRACT': '006202', + 'CENTLON': '-077.0352173', + 'AREALAND': 6539770, + 'INTPTLON': '-077.0363219', + 'MTFCC': 'G5020', + 'COUNTY': '001', + 'CENT': (-77.0352173, 38.8801546), + 'INTPT': (-77.0363219, 38.8809933) + }] + } batch_resp = [ - OrderedDict( - [ - ("id", "1"), - ("address", "908 N Washtenaw, Chicago, IL, 60622"), - ("match", True), - ("matchtype", "Non_Exact"), - ("parsed", "908 N WASHTENAW AVE, CHICAGO, IL, 60622"), - ("tigerlineid", "605058427"), - ("side", "L"), - ("statefp", "17"), - ("countyfp", "031"), - ("tract", "242600"), - ("block", "4008"), - ("lon", -87.6943), - ("lat", 41.897907), - ] - ), - OrderedDict( - [ - ("id", "2"), - ("address", "1405 Wilshire Blvd, Austin, TX, 78722"), - ("match", True), - ("matchtype", "Exact"), - ("parsed", "1405 WILSHIRE BLVD, AUSTIN, TX, 78722"), - ("tigerlineid", "63947400"), - ("side", "R"), - ("statefp", "48"), - ("countyfp", "453"), - ("tract", "000307"), - ("block", "1033"), - ("lon", -97.71405), - ("lat", 30.296574), - ] - ), - OrderedDict( - [ - ("id", "3"), - ("address", "908 N Washtenaw, Chicago, IL, 60622"), - ("match", True), - ("matchtype", "Non_Exact"), - ("parsed", "908 N WASHTENAW AVE, CHICAGO, IL, 60622"), - ("tigerlineid", "605058427"), - ("side", "L"), - ("statefp", "17"), - ("countyfp", "031"), - ("tract", "242600"), - ("block", "4008"), - ("lon", -87.6943), - ("lat", 41.897907), - ] - ), - OrderedDict( - [ - ("id", "4"), - ("address", "1405 Wilshire Blvd, Austin, TX, 78722"), - ("match", True), - ("matchtype", "Exact"), - ("parsed", "1405 WILSHIRE BLVD, AUSTIN, TX, 78722"), - ("tigerlineid", "63947400"), - ("side", "R"), - ("statefp", "48"), - ("countyfp", "453"), - ("tract", "000307"), - ("block", "1033"), - ("lon", -97.71405), - ("lat", 30.296574), - ] - ), - OrderedDict( - [ - ("id", "5"), - ("address", "908 N Washtenaw, Chicago, IL, 60622"), - ("match", True), - ("matchtype", "Non_Exact"), - ("parsed", "908 N WASHTENAW AVE, CHICAGO, IL, 60622"), - ("tigerlineid", "605058427"), - ("side", "L"), - ("statefp", "17"), - ("countyfp", "031"), - ("tract", "242600"), - ("block", "4008"), - ("lon", -87.6943), - ("lat", 41.897907), - ] - ), -] + OrderedDict([ + ('id', '1'), ('address', '908 N Washtenaw, Chicago, IL, 60622'), ('match', True), + ('matchtype', 'Non_Exact'), ('parsed', '908 N WASHTENAW AVE, CHICAGO, IL, 60622'), + ('tigerlineid', '605058427'), ('side', 'L'), ('statefp', '17'), ('countyfp', '031'), + ('tract', '242600'), ('block', '4008'), ('lon', -87.6943), ('lat', 41.897907)]), + OrderedDict([ + ('id', '2'), ('address', '1405 Wilshire Blvd, Austin, TX, 78722'), ('match', True), + ('matchtype', 'Exact'), ('parsed', '1405 WILSHIRE BLVD, AUSTIN, TX, 78722'), + ('tigerlineid', '63947400'), ('side', 'R'), ('statefp', '48'), ('countyfp', '453'), + ('tract', '000307'), ('block', '1033'), ('lon', -97.71405), ('lat', 30.296574)]), + OrderedDict([ + ('id', '3'), ('address', '908 N Washtenaw, Chicago, IL, 60622'), ('match', True), + ('matchtype', 'Non_Exact'), ('parsed', '908 N WASHTENAW AVE, CHICAGO, IL, 60622'), + ('tigerlineid', '605058427'), ('side', 'L'), ('statefp', '17'), ('countyfp', '031'), + ('tract', '242600'), ('block', '4008'), ('lon', -87.6943), ('lat', 41.897907)]), + OrderedDict([ + ('id', '4'), ('address', '1405 Wilshire Blvd, Austin, TX, 78722'), ('match', True), + ('matchtype', 'Exact'), ('parsed', '1405 WILSHIRE BLVD, AUSTIN, TX, 78722'), + ('tigerlineid', '63947400'), ('side', 'R'), ('statefp', '48'), ('countyfp', '453'), + ('tract', '000307'), ('block', '1033'), ('lon', -97.71405), ('lat', 30.296574)]), + OrderedDict([ + ('id', '5'), ('address', '908 N Washtenaw, Chicago, IL, 60622'), ('match', True), + ('matchtype', 'Non_Exact'), ('parsed', '908 N WASHTENAW AVE, CHICAGO, IL, 60622'), + ('tigerlineid', '605058427'), ('side', 'L'), ('statefp', '17'), ('countyfp', '031'), + ('tract', '242600'), ('block', '4008'), ('lon', -87.6943), ('lat', 41.897907)])] diff --git a/test/test_github/test_github.py b/test/test_github/test_github.py index fd0da7b991..213ebee895 100644 --- a/test/test_github/test_github.py +++ b/test/test_github/test_github.py @@ -3,17 +3,18 @@ from unittest.mock import patch import requests_mock -from parsons import Table, GitHub from github.GithubException import UnknownObjectException -from parsons.github.github import ParsonsGitHubError +from parsons.etl.table import Table +from parsons.github.github import GitHub, ParsonsGitHubError _dir = os.path.dirname(__file__) class TestGitHub(unittest.TestCase): + def setUp(self): - self.github = GitHub(access_token="token") + self.github = GitHub(access_token='token') @requests_mock.Mocker() def test_wrap_github_404(self, m): @@ -24,43 +25,36 @@ def test_wrap_github_404(self, m): @requests_mock.Mocker() def test_get_repo(self, m): - with open(os.path.join(_dir, "test_data", "test_get_repo.json"), "r") as f: + with open(os.path.join(_dir, 'test_data', 'test_get_repo.json'), 'r') as f: m.get(requests_mock.ANY, text=f.read()) - repo = self.github.get_repo("octocat/Hello-World") - self.assertEqual(repo["id"], 1296269) - self.assertEqual(repo["name"], "Hello-World") + repo = self.github.get_repo('octocat/Hello-World') + self.assertEqual(repo['id'], 1296269) + self.assertEqual(repo['name'], 'Hello-World') @requests_mock.Mocker() def test_list_repo_issues(self, m): - with open(os.path.join(_dir, "test_data", "test_get_repo.json"), "r") as f: - m.get("https://api.github.com:443/repos/octocat/Hello-World", text=f.read()) - with open( - os.path.join(_dir, "test_data", "test_list_repo_issues.json"), "r" - ) as f: - m.get( - "https://api.github.com:443/repos/octocat/Hello-World/issues", - text=f.read(), - ) - issues_table = self.github.list_repo_issues("octocat/Hello-World") + with open(os.path.join(_dir, 'test_data', 'test_get_repo.json'), 'r') as f: + m.get('https://api.github.com:443/repos/octocat/Hello-World', text=f.read()) + with open(os.path.join(_dir, 'test_data', 'test_list_repo_issues.json'), 'r') as f: + m.get('https://api.github.com:443/repos/octocat/Hello-World/issues', text=f.read()) + issues_table = self.github.list_repo_issues('octocat/Hello-World') self.assertIsInstance(issues_table, Table) self.assertEqual(len(issues_table.table), 2) - self.assertEqual(issues_table[0]["id"], 1) - self.assertEqual(issues_table[0]["title"], "Found a bug") + self.assertEqual(issues_table[0]['id'], 1) + self.assertEqual(issues_table[0]['title'], 'Found a bug') @requests_mock.Mocker() def test_download_file(self, m): - with open(os.path.join(_dir, "test_data", "test_get_repo.json"), "r") as f: - m.get("https://api.github.com:443/repos/octocat/Hello-World", text=f.read()) - with open(os.path.join(_dir, "test_data", "test_download_file.csv"), "r") as f: + with open(os.path.join(_dir, 'test_data', 'test_get_repo.json'), 'r') as f: + m.get('https://api.github.com:443/repos/octocat/Hello-World', text=f.read()) + with open(os.path.join(_dir, 'test_data', 'test_download_file.csv'), 'r') as f: m.get( - "https://raw.githubusercontent.com/octocat/Hello-World/testing/data.csv", - text=f.read(), + 'https://raw.githubusercontent.com/octocat/Hello-World/testing/data.csv', + text=f.read() ) - file_path = self.github.download_file( - "octocat/Hello-World", "data.csv", branch="testing" - ) - with open(file_path, "r") as f: + file_path = self.github.download_file('octocat/Hello-World', 'data.csv', branch='testing') + with open(file_path, 'r') as f: file_contents = f.read() self.assertEqual(file_contents, "header\ndata\n") diff --git a/test/test_gmail/test_gmail.py b/test/test_gmail/test_gmail.py index 5ac6902d4c..f7480f3e53 100644 --- a/test/test_gmail/test_gmail.py +++ b/test/test_gmail/test_gmail.py @@ -1,4 +1,4 @@ -from parsons import Gmail +from parsons.notifications.gmail import Gmail import json import os import requests_mock @@ -12,6 +12,7 @@ class TestGmail(unittest.TestCase): + @requests_mock.Mocker() def setUp(self, m): self.tmp_folder = "tmp/" @@ -20,54 +21,43 @@ def setUp(self, m): os.mkdir(self.tmp_folder) - with open(self.credentials_file, "w") as f: - f.write( - json.dumps( - { - "installed": { - "client_id": "someclientid.apps.googleusercontent.com", - "project_id": "some-project-id-12345", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://www.googleapis.com/oauth2/v3/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", # noqa: E501 - "client_secret": "someclientsecret", - "redirect_uris": [ - "urn:ietf:wg:oauth:2.0:oob", - "http://localhost", - ], - } - } - ) - ) - - with open(self.token_file, "w") as f: - f.write( - json.dumps( - { - "access_token": "someaccesstoken", - "client_id": "some-client-id.apps.googleusercontent.com", - "client_secret": "someclientsecret", - "refresh_token": "1/refreshrate", - "token_expiry": "2030-02-20T23:28:09Z", - "token_uri": "https://www.googleapis.com/oauth2/v3/token", - "user_agent": None, - "revoke_uri": "https://oauth2.googleapis.com/revoke", - "id_token": None, - "id_token_jwt": None, - "token_response": { - "access_token": "someaccesstoken", - "expires_in": 3600000, - "scope": "https://www.googleapis.com/auth/gmail.send", - "token_type": "Bearer", - }, - "scopes": ["https://www.googleapis.com/auth/gmail.send"], - "token_info_uri": "https://oauth2.googleapis.com/tokeninfo", - "invalid": False, - "_class": "OAuth2Credentials", - "_module": "oauth2client.client", - } - ) - ) + with open(self.credentials_file, 'w') as f: + f.write(json.dumps({ + "installed": { + "client_id": "someclientid.apps.googleusercontent.com", + "project_id": "some-project-id-12345", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://www.googleapis.com/oauth2/v3/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_secret": "someclientsecret", + "redirect_uris": ["urn:ietf:wg:oauth:2.0:oob", "http://localhost"] + } + })) + + with open(self.token_file, 'w') as f: + f.write(json.dumps({ + "access_token": "someaccesstoken", + "client_id": "some-client-id.apps.googleusercontent.com", + "client_secret": "someclientsecret", + "refresh_token": "1/refreshrate", + "token_expiry": "2030-02-20T23:28:09Z", + "token_uri": "https://www.googleapis.com/oauth2/v3/token", + "user_agent": None, + "revoke_uri": "https://oauth2.googleapis.com/revoke", + "id_token": None, + "id_token_jwt": None, + "token_response": { + "access_token": "someaccesstoken", + "expires_in": 3600000, + "scope": "https://www.googleapis.com/auth/gmail.send", + "token_type": "Bearer" + }, + "scopes": ["https://www.googleapis.com/auth/gmail.send"], + "token_info_uri": "https://oauth2.googleapis.com/tokeninfo", + "invalid": False, + "_class": "OAuth2Credentials", + "_module": "oauth2client.client" + })) self.gmail = Gmail(self.credentials_file, self.token_file) @@ -81,21 +71,20 @@ def test_create_message_simple(self): subject = "This is a test email" message_text = "The is the message text of the email" - msg = self.gmail._create_message_simple(sender, to, subject, message_text) + msg = self.gmail._create_message_simple( + sender, to, subject, message_text) raw = self.gmail._encode_raw_message(msg) decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8'))) expected_items = [ - ("Content-Type", 'text/plain; charset="us-ascii"'), - ("MIME-Version", "1.0"), - ("Content-Transfer-Encoding", "7bit"), - ("to", to), - ("from", sender), - ("subject", subject), - ] + ('Content-Type', 'text/plain; charset="us-ascii"'), + ('MIME-Version', '1.0'), + ('Content-Transfer-Encoding', '7bit'), + ('to', to), + ('from', sender), + ('subject', subject)] # Check the metadata self.assertListEqual(decoded.items(), expected_items) @@ -115,32 +104,26 @@ def test_create_message_html(self): message_html = "

This is the html message part of the email

" msg = self.gmail._create_message_html( - sender, to, subject, message_text, message_html - ) + sender, to, subject, message_text, message_html) raw = self.gmail._encode_raw_message(msg) decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8'))) expected_items = [ - ("Content-Type", "multipart/alternative;\n boundary="), - ("MIME-Version", "1.0"), - ("subject", subject), - ("from", sender), - ("to", to), - ] + ('Content-Type', 'multipart/alternative;\n boundary='), + ('MIME-Version', '1.0'), + ('subject', subject), + ('from', sender), + ('to', to)] # The boundary id changes everytime. Replace it with the beginnig to # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa + if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + ('Content-Type', 'multipart/alternative;\n boundary=')) else: updated_items.append((i[0], i[1])) @@ -164,31 +147,27 @@ def test_create_message_html_no_text(self): subject = "This is a test html email" message_html = "

This is the html message part of the email

" - msg = self.gmail._create_message_html(sender, to, subject, "", message_html) + msg = self.gmail._create_message_html( + sender, to, subject, '', message_html) raw = self.gmail._encode_raw_message(msg) decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8'))) expected_items = [ - ("Content-Type", "multipart/alternative;\n boundary="), - ("MIME-Version", "1.0"), - ("subject", subject), - ("from", sender), - ("to", to), - ] + ('Content-Type', 'multipart/alternative;\n boundary='), + ('MIME-Version', '1.0'), + ('subject', subject), + ('from', sender), + ('to', to)] # The boundary id changes everytime. Replace it with the beginnig to # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa + if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + ('Content-Type', 'multipart/alternative;\n boundary=')) else: updated_items.append((i[0], i[1])) @@ -210,38 +189,32 @@ def test_create_message_attachments(self): to = "Recepient " subject = "This is a test email with attachements" message_text = "The is the message text of the email with attachments" - message_html = ( - "

This is the html message part of the email " "with attachments

" - ) - attachments = [f"{_dir}/assets/loremipsum.txt"] + message_html = ("

This is the html message part of the email " + "with attachments

") + attachments = [f'{_dir}/assets/loremipsum.txt'] msg = self.gmail._create_message_attachments( - sender, to, subject, message_text, attachments, message_html=message_html - ) + sender, to, subject, message_text, attachments, + message_html=message_html) raw = self.gmail._encode_raw_message(msg) decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8'))) expected_items = [ - ("Content-Type", "multipart/alternative;\n boundary="), - ("MIME-Version", "1.0"), - ("to", to), - ("from", sender), - ("subject", subject), - ] + ('Content-Type', 'multipart/alternative;\n boundary='), + ('MIME-Version', '1.0'), + ('to', to), + ('from', sender), + ('subject', subject)] # The boundary id changes everytime. Replace it with the beginnig to # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa + if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + ('Content-Type', 'multipart/alternative;\n boundary=')) else: updated_items.append((i[0], i[1])) @@ -255,16 +228,16 @@ def test_create_message_attachments(self): self.assertEqual(parts[0].get_payload(), message_text) self.assertEqual(parts[1].get_payload(), message_html) - if os.linesep == "\r\n": - file = f"{_dir}/assets/loremipsum_b64_win_txt.txt" + if os.linesep == '\r\n': + file = f'{_dir}/assets/loremipsum_b64_win_txt.txt' else: - file = f"{_dir}/assets/loremipsum_b64_txt.txt" + file = f'{_dir}/assets/loremipsum_b64_txt.txt' - with open(file, "r") as f: + with open(file, 'r') as f: b64_txt = f.read() self.assertEqual(parts[2].get_payload(), b64_txt) - self.assertEqual(parts[2].get_content_type(), "text/plain") + self.assertEqual(parts[2].get_content_type(), 'text/plain') # Check the number of parts expected_parts = 4 @@ -275,38 +248,32 @@ def test_create_message_attachments_jpeg(self): to = "Recepient " subject = "This is a test email with attachements" message_text = "The is the message text of the email with attachments" - message_html = ( - "

This is the html message part of the email " "with attachments

" - ) - attachments = [f"{_dir}/assets/loremipsum.jpeg"] + message_html = ("

This is the html message part of the email " + "with attachments

") + attachments = [f'{_dir}/assets/loremipsum.jpeg'] msg = self.gmail._create_message_attachments( - sender, to, subject, message_text, attachments, message_html=message_html - ) + sender, to, subject, message_text, attachments, + message_html=message_html) raw = self.gmail._encode_raw_message(msg) decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8'))) expected_items = [ - ("Content-Type", "multipart/alternative;\n boundary="), - ("MIME-Version", "1.0"), - ("to", to), - ("from", sender), - ("subject", subject), - ] + ('Content-Type', 'multipart/alternative;\n boundary='), + ('MIME-Version', '1.0'), + ('to', to), + ('from', sender), + ('subject', subject)] # The boundary id changes everytime. Replace it with the beginnig to # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa + if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + ('Content-Type', 'multipart/alternative;\n boundary=')) else: updated_items.append((i[0], i[1])) @@ -320,13 +287,13 @@ def test_create_message_attachments_jpeg(self): self.assertEqual(parts[0].get_payload(), message_text) self.assertEqual(parts[1].get_payload(), message_html) - with open(f"{_dir}/assets/loremipsum_b64_jpeg.txt", "r") as f: + with open(f'{_dir}/assets/loremipsum_b64_jpeg.txt', 'r') as f: b64_txt = f.read() self.assertEqual(parts[2].get_payload(), b64_txt) expected_id = f"<{attachments[0].split('/')[-1]}>" - self.assertEqual(parts[2].get("Content-ID"), expected_id) - self.assertEqual(parts[2].get_content_type(), "image/jpeg") + self.assertEqual(parts[2].get('Content-ID'), expected_id) + self.assertEqual(parts[2].get_content_type(), 'image/jpeg') # Check the number of parts expected_parts = 4 @@ -337,38 +304,32 @@ def test_create_message_attachments_m4a(self): to = "Recepient " subject = "This is a test email with attachements" message_text = "The is the message text of the email with attachments" - message_html = ( - "

This is the html message part of the email " "with attachments

" - ) - attachments = [f"{_dir}/assets/loremipsum.m4a"] + message_html = ("

This is the html message part of the email " + "with attachments

") + attachments = [f'{_dir}/assets/loremipsum.m4a'] msg = self.gmail._create_message_attachments( - sender, to, subject, message_text, attachments, message_html=message_html - ) + sender, to, subject, message_text, attachments, + message_html=message_html) raw = self.gmail._encode_raw_message(msg) decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8'))) expected_items = [ - ("Content-Type", "multipart/alternative;\n boundary="), - ("MIME-Version", "1.0"), - ("to", to), - ("from", sender), - ("subject", subject), - ] + ('Content-Type', 'multipart/alternative;\n boundary='), + ('MIME-Version', '1.0'), + ('to', to), + ('from', sender), + ('subject', subject)] # The boundary id changes everytime. Replace it with the beginnig to # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa + if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + ('Content-Type', 'multipart/alternative;\n boundary=')) else: updated_items.append((i[0], i[1])) @@ -382,11 +343,11 @@ def test_create_message_attachments_m4a(self): self.assertEqual(parts[0].get_payload(), message_text) self.assertEqual(parts[1].get_payload(), message_html) - with open(f"{_dir}/assets/loremipsum_b64_m4a.txt", "r") as f: + with open(f'{_dir}/assets/loremipsum_b64_m4a.txt', 'r') as f: b64_txt = f.read() self.assertEqual(parts[2].get_payload(), b64_txt) - self.assertEqual(parts[2].get_content_maintype(), "audio") + self.assertEqual(parts[2].get_content_maintype(), 'audio') # Check the number of parts expected_parts = 4 @@ -397,38 +358,32 @@ def test_create_message_attachments_mp3(self): to = "Recepient " subject = "This is a test email with attachements" message_text = "The is the message text of the email with attachments" - message_html = ( - "

This is the html message part of the email " "with attachments

" - ) - attachments = [f"{_dir}/assets/loremipsum.mp3"] + message_html = ("

This is the html message part of the email " + "with attachments

") + attachments = [f'{_dir}/assets/loremipsum.mp3'] msg = self.gmail._create_message_attachments( - sender, to, subject, message_text, attachments, message_html=message_html - ) + sender, to, subject, message_text, attachments, + message_html=message_html) raw = self.gmail._encode_raw_message(msg) decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8'))) expected_items = [ - ("Content-Type", "multipart/alternative;\n boundary="), - ("MIME-Version", "1.0"), - ("to", to), - ("from", sender), - ("subject", subject), - ] + ('Content-Type', 'multipart/alternative;\n boundary='), + ('MIME-Version', '1.0'), + ('to', to), + ('from', sender), + ('subject', subject)] # The boundary id changes everytime. Replace it with the beginnig to # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa + if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + ('Content-Type', 'multipart/alternative;\n boundary=')) else: updated_items.append((i[0], i[1])) @@ -442,11 +397,11 @@ def test_create_message_attachments_mp3(self): self.assertEqual(parts[0].get_payload(), message_text) self.assertEqual(parts[1].get_payload(), message_html) - with open(f"{_dir}/assets/loremipsum_b64_mp3.txt", "r") as f: + with open(f'{_dir}/assets/loremipsum_b64_mp3.txt', 'r') as f: b64_txt = f.read() self.assertEqual(parts[2].get_payload(), b64_txt) - self.assertEqual(parts[2].get_content_type(), "audio/mpeg") + self.assertEqual(parts[2].get_content_type(), 'audio/mpeg') # Check the number of parts expected_parts = 4 @@ -457,38 +412,32 @@ def test_create_message_attachments_mp4(self): to = "Recepient " subject = "This is a test email with attachements" message_text = "The is the message text of the email with attachments" - message_html = ( - "

This is the html message part of the email " "with attachments

" - ) - attachments = [f"{_dir}/assets/loremipsum.mp4"] + message_html = ("

This is the html message part of the email " + "with attachments

") + attachments = [f'{_dir}/assets/loremipsum.mp4'] msg = self.gmail._create_message_attachments( - sender, to, subject, message_text, attachments, message_html=message_html - ) + sender, to, subject, message_text, attachments, + message_html=message_html) raw = self.gmail._encode_raw_message(msg) decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8'))) expected_items = [ - ("Content-Type", "multipart/alternative;\n boundary="), - ("MIME-Version", "1.0"), - ("to", to), - ("from", sender), - ("subject", subject), - ] + ('Content-Type', 'multipart/alternative;\n boundary='), + ('MIME-Version', '1.0'), + ('to', to), + ('from', sender), + ('subject', subject)] # The boundary id changes everytime. Replace it with the beginnig to # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa + if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + ('Content-Type', 'multipart/alternative;\n boundary=')) else: updated_items.append((i[0], i[1])) @@ -502,11 +451,11 @@ def test_create_message_attachments_mp4(self): self.assertEqual(parts[0].get_payload(), message_text) self.assertEqual(parts[1].get_payload(), message_html) - with open(f"{_dir}/assets/loremipsum_b64_mp4.txt", "r") as f: + with open(f'{_dir}/assets/loremipsum_b64_mp4.txt', 'r') as f: b64_txt = f.read() self.assertEqual(parts[2].get_payload(), b64_txt) - self.assertEqual(parts[2].get_content_type(), "video/mp4") + self.assertEqual(parts[2].get_content_type(), 'video/mp4') # Check the number of parts expected_parts = 4 @@ -517,39 +466,33 @@ def test_create_message_attachments_pdf(self): to = "Recepient " subject = "This is a test email with attachements" message_text = "The is the message text of the email with attachments" - message_html = ( - "

This is the html message part of the email " "with attachments

" - ) - attachments = [f"{_dir}/assets/loremipsum.pdf"] + message_html = ("

This is the html message part of the email " + "with attachments

") + attachments = [f'{_dir}/assets/loremipsum.pdf'] msg = self.gmail._create_message_attachments( - sender, to, subject, message_text, attachments, message_html=message_html - ) + sender, to, subject, message_text, attachments, + message_html=message_html) raw = self.gmail._encode_raw_message(msg) decoded = email.message_from_bytes( - base64.urlsafe_b64decode(bytes(raw["raw"], "utf-8")) - ) + base64.urlsafe_b64decode(bytes(raw['raw'], 'utf-8'))) expected_items = [ - ("Content-Type", "multipart/alternative;\n boundary="), - ("MIME-Version", "1.0"), - ("to", to), - ("from", sender), - ("subject", subject), - ] + ('Content-Type', 'multipart/alternative;\n boundary='), + ('MIME-Version', '1.0'), + ('to', to), + ('from', sender), + ('subject', subject)] # The boundary id changes everytime. Replace it with the beginnig to # avoid failures updated_items = [] for i in decoded.items(): - if ( - "Content-Type" in i[0] and "multipart/alternative;\n boundary=" in i[1] - ): # noqa + if 'Content-Type' in i[0] and 'multipart/alternative;\n boundary=' in i[1]: # noqa updated_items.append( - ("Content-Type", "multipart/alternative;\n boundary=") - ) + ('Content-Type', 'multipart/alternative;\n boundary=')) else: updated_items.append((i[0], i[1])) @@ -563,11 +506,11 @@ def test_create_message_attachments_pdf(self): self.assertEqual(parts[0].get_payload(), message_text) self.assertEqual(parts[1].get_payload(), message_html) - with open(f"{_dir}/assets/loremipsum_b64_pdf.txt", "r") as f: + with open(f'{_dir}/assets/loremipsum_b64_pdf.txt', 'r') as f: b64_txt = f.read() self.assertEqual(parts[2].get_payload(), b64_txt) - self.assertEqual(parts[2].get_content_type(), "application/pdf") + self.assertEqual(parts[2].get_content_type(), 'application/pdf') # Check the number of parts expected_parts = 4 @@ -581,16 +524,15 @@ def test__validate_email_string(self): {"email": "Sender sender@email.com", "expected": False}, {"email": "Sender ", "expected": False}, {"email": "Sender ", "expected": True}, - {"email": "Sender ", "expected": True}, + {"email": "Sender ", "expected": True} ] for e in emails: - if e["expected"]: - self.assertTrue(self.gmail._validate_email_string(e["email"])) + if e['expected']: + self.assertTrue(self.gmail._validate_email_string(e['email'])) else: self.assertRaises( - ValueError, self.gmail._validate_email_string, e["email"] - ) + ValueError, self.gmail._validate_email_string, e['email']) # TODO test sending emails diff --git a/test/test_google/googlecivic_responses.py b/test/test_google/googlecivic_responses.py index 3b40b82f2a..8196136bf4 100644 --- a/test/test_google/googlecivic_responses.py +++ b/test/test_google/googlecivic_responses.py @@ -1,920 +1,901 @@ # flake8: noqa elections_resp = { - "kind": "civicinfo#electionsQueryResponse", - "elections": [ - { - "id": "2000", - "name": "VIP Test Election", - "electionDay": "2021-06-06", - "ocdDivisionId": "ocd-division/country:us", - }, - { - "id": "4803", - "name": "Los Angeles County Election", - "electionDay": "2019-05-14", - "ocdDivisionId": "ocd-division/country:us/state:ca/county:los_angeles", - }, - { - "id": "4804", - "name": "Oklahoma Special Election", - "electionDay": "2019-05-14", - "ocdDivisionId": "ocd-division/country:us/state:ok", - }, - { - "id": "4810", - "name": "Oregon County Special Elections", - "electionDay": "2019-05-21", - "ocdDivisionId": "ocd-division/country:us/state:or", - }, - { - "id": "4811", - "name": "Los Angeles County Special Election", - "electionDay": "2019-06-04", - "ocdDivisionId": "ocd-division/country:us/state:ca/county:los_angeles", - }, - { - "id": "4823", - "name": "9th Congressional District Primary Election", - "electionDay": "2019-05-14", - "ocdDivisionId": "ocd-division/country:us/state:nc/cd:9", - }, - ], + 'kind': 'civicinfo#electionsQueryResponse', + 'elections': [{ + 'id': '2000', + 'name': 'VIP Test Election', + 'electionDay': '2021-06-06', + 'ocdDivisionId': 'ocd-division/country:us' + }, { + 'id': '4803', + 'name': 'Los Angeles County Election', + 'electionDay': '2019-05-14', + 'ocdDivisionId': 'ocd-division/country:us/state:ca/county:los_angeles' + }, { + 'id': '4804', + 'name': 'Oklahoma Special Election', + 'electionDay': '2019-05-14', + 'ocdDivisionId': 'ocd-division/country:us/state:ok' + }, { + 'id': '4810', + 'name': 'Oregon County Special Elections', + 'electionDay': '2019-05-21', + 'ocdDivisionId': 'ocd-division/country:us/state:or' + }, { + 'id': '4811', + 'name': 'Los Angeles County Special Election', + 'electionDay': '2019-06-04', + 'ocdDivisionId': 'ocd-division/country:us/state:ca/county:los_angeles' + }, { + 'id': '4823', + 'name': '9th Congressional District Primary Election', + 'electionDay': '2019-05-14', + 'ocdDivisionId': 'ocd-division/country:us/state:nc/cd:9' + }] } voterinfo_resp = { - "kind": "civicinfo#voterInfoResponse", - "election": { - "id": "2000", - "name": "VIP Test Election", - "electionDay": "2021-06-06", - "ocdDivisionId": "ocd-division/country:us", + 'kind': 'civicinfo#voterInfoResponse', + 'election': { + 'id': '2000', + 'name': 'VIP Test Election', + 'electionDay': '2021-06-06', + 'ocdDivisionId': 'ocd-division/country:us' }, - "normalizedInput": { - "line1": "900 North Washtenaw Avenue", - "city": "Chicago", - "state": "IL", - "zip": "60622", + 'normalizedInput': { + 'line1': '900 North Washtenaw Avenue', + 'city': 'Chicago', + 'state': 'IL', + 'zip': '60622' }, - "pollingLocations": [ - { - "address": { - "locationName": "UKRAINIAN ORTHDX PATRONAGE CH", - "line1": "904 N WASHTENAW AVE", - "city": "CHICAGO", - "state": "IL", - "zip": "60622", - }, - "notes": "", - "pollingHours": "", - "sources": [{"name": "Voting Information Project", "official": True}], - } - ], - "contests": [ - { - "type": "General", - "office": "United States Senator", - "level": ["country"], - "roles": ["legislatorUpperBody"], - "district": { - "name": "Illinois", - "scope": "statewide", - "id": "ocd-division/country:us/state:il", - }, - "candidates": [ - { - "name": 'James D. "Jim" Oberweis', - "party": "Republican", - "candidateUrl": "http://jimoberweis.com", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/Oberweis2014", - }, - {"type": "Twitter", "id": "https://twitter.com/Oberweis2014"}, - { - "type": "YouTube", - "id": "https://www.youtube.com/channel/UCOVqW3lh9q9cnk-R2NedLTw", - }, - ], - }, - { - "name": "Richard J. Durbin", - "party": "Democratic", - "candidateUrl": "http://www.dickdurbin.com/home", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/dickdurbin", - }, - {"type": "Twitter", "id": "https://twitter.com/DickDurbin"}, - { - "type": "YouTube", - "id": "https://www.youtube.com/user/SenatorDickDurbin", - }, - ], - }, - { - "name": "Sharon Hansen", - "party": "Libertarian", - "candidateUrl": "http://www.sharonhansenforussenate.org/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/USSenate2014", - }, - { - "type": "YouTube", - "id": "https://www.youtube.com/user/nairotci", - }, - ], - }, - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "US House of Representatives - District 7", - "level": ["country"], - "roles": ["legislatorLowerBody"], - "district": { - "name": "Illinois's 7th congressional district", - "scope": "congressional", - "id": "ocd-division/country:us/state:il/cd:7", - }, - "candidates": [ - { - "name": "Danny K. Davis", - "party": "Democratic", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/dkdforcongress", - } - ], - }, - {"name": "Robert L. Bumpers", "party": "Republican"}, - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Governor/ Lieutenant Governor", - "level": ["administrativeArea1"], - "roles": ["headOfGovernment"], - "district": { - "name": "Illinois", - "scope": "statewide", - "id": "ocd-division/country:us/state:il", - }, - "candidates": [ - { - "name": "Bruce Rauner/ Evelyn Sanguinetti", - "party": "Republican", - "candidateUrl": "http://brucerauner.com/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/BruceRauner", - }, - {"type": "Twitter", "id": "https://twitter.com/BruceRauner"}, - { - "type": "GooglePlus", - "id": "https://plus.google.com/117459818564381220425", - }, - { - "type": "YouTube", - "id": "https://www.youtube.com/user/brucerauner", - }, - ], - }, - { - "name": "Chad Grimm/ Alexander Cummings", - "party": "Libertarian", - "candidateUrl": "http://www.grimmforliberty.com/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/grimmforgovernor", - }, - { - "type": "Twitter", - "id": "https://twitter.com/GrimmForLiberty", - }, - { - "type": "GooglePlus", - "id": "https://plus.google.com/118063028184706045944", - }, - { - "type": "YouTube", - "id": "https://www.youtube.com/channel/UC7RjCAp7oAGM8iykNl5aCsQ", - }, - ], - }, - { - "name": "Pat Quinn/ Paul Vallas", - "party": "Democratic", - "candidateUrl": "https://www.quinnforillinois.com/00/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/quinnforillinois", - }, - {"type": "Twitter", "id": "https://twitter.com/quinnforil"}, - { - "type": "YouTube", - "id": "https://www.youtube.com/user/QuinnForIllinois", - }, - ], - }, - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Comptroller", - "level": ["administrativeArea1"], - "roles": ["governmentOfficer"], - "district": { - "name": "Illinois", - "scope": "statewide", - "id": "ocd-division/country:us/state:il", - }, - "candidates": [ - { - "name": "Judy Baar Topinka", - "party": "Republican", - "candidateUrl": "http://judybaartopinka.com", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/153417423039", - }, - {"type": "Twitter", "id": "https://twitter.com/ElectTopinka"}, - { - "type": "GooglePlus", - "id": "https://plus.google.com/118116620949235387993", - }, - { - "type": "YouTube", - "id": "https://www.youtube.com/channel/UCfbQXLS2yrY1wAJQH2oq4Kg", - }, - ], - }, - { - "name": "Julie Fox", - "party": "Libertarian", - "candidateUrl": "http://juliefox2014.com/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/154063524725251", - }, - {"type": "Twitter", "id": "https://twitter.com/JulieFox1214"}, - { - "type": "GooglePlus", - "id": "https://plus.google.com/+Juliefox2014", - }, - { - "type": "YouTube", - "id": "https://www.youtube.com/channel/UCz2A7-6e0_pJJ10bXvBvcIA", - }, - ], - }, - { - "name": "Sheila Simon", - "party": "Democratic", - "candidateUrl": "http://www.sheilasimon.org", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/SheilaSimonIL", - }, - {"type": "Twitter", "id": "https://twitter.com/SheilaSimonIL"}, - { - "type": "YouTube", - "id": "https://www.youtube.com/user/SheilaSimonIL", - }, - ], - }, - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Secretary Of State", - "level": ["administrativeArea1"], - "roles": ["governmentOfficer"], - "district": { - "name": "Illinois", - "scope": "statewide", - "id": "ocd-division/country:us/state:il", - }, - "candidates": [ - { - "name": "Christopher Michel", - "party": "Libertarian", - "candidateUrl": "http://chrisforillinois.org/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/ChrisMichelforIllinois", - } - ], - }, - {"name": "Jesse White", "party": "Democratic"}, - { - "name": "Michael Webster", - "party": "Republican", - "candidateUrl": "http://websterforillinois.net/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/MikeWebsterIL", - }, - {"type": "Twitter", "id": "https://twitter.com/MikeWebsterIL"}, - { - "type": "GooglePlus", - "id": "https://plus.google.com/106530502764515758186", - }, - { - "type": "YouTube", - "id": "https://www.youtube.com/user/MikeWebsterIL", - }, - ], - }, - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Attorney General", - "level": ["administrativeArea1"], - "roles": ["governmentOfficer"], - "district": { - "name": "Illinois", - "scope": "statewide", - "id": "ocd-division/country:us/state:il", - }, - "candidates": [ - { - "name": "Ben Koyl", - "party": "Libertarian", - "candidateUrl": "http://koyl4ilattorneygeneral.com/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/Koyl4AttorneyGeneral", - } - ], - }, - { - "name": "Lisa Madigan", - "party": "Democratic", - "candidateUrl": "http://lisamadigan.org/splash", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/lisamadigan", - }, - {"type": "Twitter", "id": "https://twitter.com/LisaMadigan"}, - { - "type": "GooglePlus", - "id": "https://plus.google.com/106732728212286274178", - }, - { - "type": "YouTube", - "id": "https://www.youtube.com/user/LisaMadigan", - }, - ], - }, - { - "name": "Paul M. Schimpf", - "party": "Republican", - "candidateUrl": "http://www.schimpf4illinois.com/contact_us?splash=1", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/136912986515438", - }, - { - "type": "Twitter", - "id": "https://twitter.com/Schimpf_4_IL_AG", - }, - ], - }, - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Treasurer", - "level": ["administrativeArea1"], - "roles": ["governmentOfficer"], - "district": { - "name": "Illinois", - "scope": "statewide", - "id": "ocd-division/country:us/state:il", - }, - "candidates": [ - { - "name": "Matthew Skopek", - "party": "Libertarian", - "candidateUrl": "http://www.matthewskopek.com/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/TransparentandResponsibleGoverment", - } - ], - }, - { - "name": "Michael W. Frerichs", - "party": "Democratic", - "candidateUrl": "http://frerichsforillinois.com/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/mikeforillinois", - }, - { - "type": "Twitter", - "id": "https://twitter.com/mikeforillinois", - }, - { - "type": "GooglePlus", - "id": "https://plus.google.com/116963380840614292664", - }, - { - "type": "YouTube", - "id": "https://www.youtube.com/channel/UCX77L5usHWxrr0BdOv0r8Dg", - }, - ], - }, - { - "name": "Tom Cross", - "party": "Republican", - "candidateUrl": "http://jointomcross.com", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/JoinTomCross", - }, - {"type": "Twitter", "id": "https://twitter.com/JoinTomCross"}, - { - "type": "GooglePlus", - "id": "https://plus.google.com/117776663930603924689", - }, - { - "type": "YouTube", - "id": "https://www.youtube.com/channel/UCDBLEvIGHJX1kIc_eZL5qPw", - }, - ], - }, - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "State House - District 4", - "level": ["administrativeArea1"], - "roles": ["legislatorLowerBody"], - "district": { - "name": "Illinois State House district 4", - "scope": "stateLower", - "id": "ocd-division/country:us/state:il/sldl:4", - }, - "candidates": [{"name": "Cynthia Soto", "party": "Democratic"}], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook County Treasurer", - "level": ["administrativeArea2"], - "roles": ["governmentOfficer"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [{"name": "Maria Pappas", "party": "Democratic"}], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook County Clerk", - "level": ["administrativeArea2"], - "roles": ["governmentOfficer"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [ - { - "name": "David D. Orr", - "party": "Democratic", - "candidateUrl": "http://www.davidorr.org/", - "channels": [ - {"type": "Facebook", "id": "https://www.facebook.com/ClerkOrr"}, - { - "type": "Twitter", - "id": "https://twitter.com/cookcountyclerk", - }, - { - "type": "YouTube", - "id": "https://www.youtube.com/user/TheDavidOrr", - }, - ], - } - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook County Sheriff", - "level": ["administrativeArea2"], - "roles": ["governmentOfficer"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [ - { - "name": "Thomas J. Dart", - "party": "Democratic", - "candidateUrl": "http://www.sherifftomdart.com/", - "channels": [ - {"type": "Twitter", "id": "https://twitter.com/TomDart"} - ], - } - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook County Assessor", - "level": ["administrativeArea2"], - "roles": ["governmentOfficer"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [ - { - "name": "Joseph Berrios", - "party": "Democratic", - "candidateUrl": "http://www.electjoeberrios.com/", - } - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook County Board President", - "level": ["administrativeArea2"], - "roles": ["legislatorUpperBody"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [ - { - "name": "Toni Preckwinkle", - "party": "Democratic", - "candidateUrl": "http://www.tonipreckwinkle.org/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/196166530417661", - }, - { - "type": "Twitter", - "id": "https://twitter.com/ToniPreckwinkle", - }, - ], - } - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook Circuit - Arnold Vacancy", - "level": ["administrativeArea2"], - "roles": ["judge"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [ - { - "name": "Bridget Anne Mitchell", - "party": "Democratic", - "candidateUrl": "http://mitchellforjudge.com", - "email": "bridget@mitchellforjudge.com", - } - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook Circuit - Reyes Vacancy", - "level": ["administrativeArea2"], - "roles": ["judge"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [{"name": "Diana Rosario", "party": "Democratic"}], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook Circuit - Howse, Jr. Vacancy", - "level": ["administrativeArea2"], - "roles": ["judge"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [ - { - "name": "Caroline Kate Moreland", - "party": "Democratic", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/judgemoreland", - } - ], - } - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook Circuit - Neville, Jr. Vacancy", - "level": ["administrativeArea2"], - "roles": ["judge"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [{"name": "William B. Raines", "party": "Democratic"}], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook Circuit - Egan Vacancy", - "level": ["administrativeArea2"], - "roles": ["judge"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [ - { - "name": "Daniel J. Kubasiak", - "party": "Democratic", - "candidateUrl": "http://www.judgedank.org/", - "email": "Info@JudgeDanK.org", - } - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook Circuit - Connors Vacancy", - "level": ["administrativeArea2"], - "roles": ["judge"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [ - { - "name": "Kristal Rivers", - "party": "Democratic", - "candidateUrl": "http://rivers4judge.org/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/193818317451678", - }, - {"type": "Twitter", "id": "https://twitter.com/Rivers4Judge"}, - ], - } - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook Circuit - McDonald Vacancy", - "level": ["administrativeArea2"], - "roles": ["judge"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [ - { - "name": "Cynthia Y. Cobbs", - "party": "Democratic", - "candidateUrl": "http://judgecobbs.com/", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/1387935061420024", - }, - {"type": "Twitter", "id": "https://twitter.com/judgecobbs"}, - ], - } - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook Circuit - Lowrance Vacancy", - "level": ["administrativeArea2"], - "roles": ["judge"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [{"name": "Thomas J. Carroll", "party": "Democratic"}], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook Circuit - Veal Vacancy", - "level": ["administrativeArea2"], - "roles": ["judge"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [ - { - "name": "Andrea Michele Buford", - "party": "Democratic", - "channels": [ - { - "type": "Facebook", - "id": "https://www.facebook.com/ElectJudgeBufordForTheBench", - } - ], - } - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook Circuit - Burke Vacancy", - "level": ["administrativeArea2"], - "roles": ["judge"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [{"name": "Maritza Martinez", "party": "Democratic"}], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "General", - "office": "Cook Circuit - Felton Vacancy", - "level": ["administrativeArea2"], - "roles": ["judge"], - "district": { - "name": "Cook County", - "scope": "countywide", - "id": "ocd-division/country:us/state:il/county:cook", - }, - "candidates": [ - { - "name": "Patricia O'Brien Sheahan", - "party": "Democratic", - "candidateUrl": "http://sheahanforjudge.com/", - } - ], - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "Referendum", - "district": { - "name": "Illinois", - "scope": "statewide", - "id": "ocd-division/country:us/state:il", - }, - "referendumTitle": "CONSTITUTION BALLOT PROPOSED AMENDMENT TO THE 1970 ILLINOIS CONSTITUTION (1)", - "referendumSubtitle": '"NOTICE THE FAILURE TO VOTE THIS BALLOT MAY BE THE EQUIVALENT OF A NEGATIVE VOTE, BECAUSE A CONVENTION SHALL BE CALLED OR THE AMENDMENT SHALL BECOME EFFECTIVE IF APPROVED BY EITHER THREE-FIFTHS OF THOSE VOTING ON THE QUESTION OR A MAJORITY OF THOSE VOTING IN THE ELECTION. (THIS IS NOT TO BE CONSTRUED AS A DIRECTION THAT YOUR VOTE IS REQUIRED TO BE CAST EITHER IN FAVOR OF OR IN OPPOSITION TO THE PROPOSITION HEREIN CONTAINED.) WHETHER YOU VOTE THIS BALLOT OR NOT YOU MUST RETURN IT TO THE ELECTION JUDGE WHEN YOU LEAVE THE VOTING BOOTH".', - "referendumUrl": "http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15966", - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "Referendum", - "district": { - "name": "Illinois", - "scope": "statewide", - "id": "ocd-division/country:us/state:il", - }, - "referendumTitle": "CONSTITUTION BALLOT PROPOSED AMENDMENT TO THE 1970 ILLINOIS CONSTITUTION (2)", - "referendumSubtitle": '"NOTICE THE FAILURE TO VOTE THIS BALLOT MAY BE THE EQUIVALENT OF A NEGATIVE VOTE, BECAUSE A CONVENTION SHALL BE CALLED OR THE AMENDMENT SHALL BECOME EFFECTIVE IF APPROVED BY EITHER THREE-FIFTHS OF THOSE VOTING ON THE QUESTION OR A MAJORITY OF THOSE VOTING IN THE ELECTION. (THIS IS NOT TO BE CONSTRUED AS A DIRECTION THAT YOUR VOTE IS REQUIRED TO BE CAST EITHER IN FAVOR OF OR IN OPPOSITION TO THE PROPOSITION HEREIN CONTAINED.) WHETHER YOU VOTE THIS BALLOT OR NOT YOU MUST RETURN IT TO THE ELECTION JUDGE WHEN YOU LEAVE THE VOTING BOOTH".', - "referendumUrl": "http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15967", - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "Referendum", - "district": { - "name": "Illinois", - "scope": "statewide", - "id": "ocd-division/country:us/state:il", - }, - "referendumTitle": "STATEWIDE ADVISORY QUESTION (1)", - "referendumUrl": "http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15738", - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "Referendum", - "district": { - "name": "Illinois", - "scope": "statewide", - "id": "ocd-division/country:us/state:il", - }, - "referendumTitle": "STATEWIDE ADVISORY QUESTION (2)", - "referendumUrl": "http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15739", - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - { - "type": "Referendum", - "district": { - "name": "Illinois", - "scope": "statewide", - "id": "ocd-division/country:us/state:il", - }, - "referendumTitle": "STATEWIDE ADVISORY QUESTION (3)", - "referendumUrl": "http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15740", - "sources": [{"name": "Ballot Information Project", "official": False}], - }, - ], - "state": [ - { - "name": "Illinois", - "electionAdministrationBody": { - "name": "Illinois State Board of Elections", - "electionInfoUrl": "http://www.elections.il.gov", - "votingLocationFinderUrl": "https://ova.elections.il.gov/PollingPlaceLookup.aspx", - "ballotInfoUrl": "https://www.elections.il.gov/ElectionInformation/OfficesUpForElection.aspx?ID=2GLMQa4Rilk%3d", - "correspondenceAddress": { - "line1": "2329 S Macarthur Blvd.", - "city": "Springfield", - "state": "Illinois", - "zip": "62704-4503", - }, - }, - "local_jurisdiction": { - "name": "CITY OF CHICAGO", - "sources": [{"name": "Voting Information Project", "official": True}], - }, - "sources": [{"name": "", "official": False}], - } - ], + 'pollingLocations': [{ + 'address': { + 'locationName': 'UKRAINIAN ORTHDX PATRONAGE CH', + 'line1': '904 N WASHTENAW AVE', + 'city': 'CHICAGO', + 'state': 'IL', + 'zip': '60622' + }, + 'notes': '', + 'pollingHours': '', + 'sources': [{ + 'name': 'Voting Information Project', + 'official': True + }] + }], + 'contests': [{ + 'type': 'General', + 'office': 'United States Senator', + 'level': ['country'], + 'roles': ['legislatorUpperBody'], + 'district': { + 'name': 'Illinois', + 'scope': 'statewide', + 'id': 'ocd-division/country:us/state:il' + }, + 'candidates': [{ + 'name': 'James D. "Jim" Oberweis', + 'party': 'Republican', + 'candidateUrl': 'http://jimoberweis.com', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/Oberweis2014' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/Oberweis2014' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/channel/UCOVqW3lh9q9cnk-R2NedLTw' + }] + }, { + 'name': 'Richard J. Durbin', + 'party': 'Democratic', + 'candidateUrl': 'http://www.dickdurbin.com/home', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/dickdurbin' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/DickDurbin' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/user/SenatorDickDurbin' + }] + }, { + 'name': 'Sharon Hansen', + 'party': 'Libertarian', + 'candidateUrl': 'http://www.sharonhansenforussenate.org/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/USSenate2014' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/user/nairotci' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'US House of Representatives - District 7', + 'level': ['country'], + 'roles': ['legislatorLowerBody'], + 'district': { + 'name': "Illinois's 7th congressional district", + 'scope': 'congressional', + 'id': 'ocd-division/country:us/state:il/cd:7' + }, + 'candidates': [{ + 'name': 'Danny K. Davis', + 'party': 'Democratic', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/dkdforcongress' + }] + }, { + 'name': 'Robert L. Bumpers', + 'party': 'Republican' + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Governor/ Lieutenant Governor', + 'level': ['administrativeArea1'], + 'roles': ['headOfGovernment'], + 'district': { + 'name': 'Illinois', + 'scope': 'statewide', + 'id': 'ocd-division/country:us/state:il' + }, + 'candidates': [{ + 'name': 'Bruce Rauner/ Evelyn Sanguinetti', + 'party': 'Republican', + 'candidateUrl': 'http://brucerauner.com/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/BruceRauner' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/BruceRauner' + }, { + 'type': 'GooglePlus', + 'id': 'https://plus.google.com/117459818564381220425' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/user/brucerauner' + }] + }, { + 'name': 'Chad Grimm/ Alexander Cummings', + 'party': 'Libertarian', + 'candidateUrl': 'http://www.grimmforliberty.com/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/grimmforgovernor' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/GrimmForLiberty' + }, { + 'type': 'GooglePlus', + 'id': 'https://plus.google.com/118063028184706045944' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/channel/UC7RjCAp7oAGM8iykNl5aCsQ' + }] + }, { + 'name': 'Pat Quinn/ Paul Vallas', + 'party': 'Democratic', + 'candidateUrl': 'https://www.quinnforillinois.com/00/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/quinnforillinois' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/quinnforil' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/user/QuinnForIllinois' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Comptroller', + 'level': ['administrativeArea1'], + 'roles': ['governmentOfficer'], + 'district': { + 'name': 'Illinois', + 'scope': 'statewide', + 'id': 'ocd-division/country:us/state:il' + }, + 'candidates': [{ + 'name': 'Judy Baar Topinka', + 'party': 'Republican', + 'candidateUrl': 'http://judybaartopinka.com', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/153417423039' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/ElectTopinka' + }, { + 'type': 'GooglePlus', + 'id': 'https://plus.google.com/118116620949235387993' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/channel/UCfbQXLS2yrY1wAJQH2oq4Kg' + }] + }, { + 'name': 'Julie Fox', + 'party': 'Libertarian', + 'candidateUrl': 'http://juliefox2014.com/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/154063524725251' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/JulieFox1214' + }, { + 'type': 'GooglePlus', + 'id': 'https://plus.google.com/+Juliefox2014' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/channel/UCz2A7-6e0_pJJ10bXvBvcIA' + }] + }, { + 'name': 'Sheila Simon', + 'party': 'Democratic', + 'candidateUrl': 'http://www.sheilasimon.org', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/SheilaSimonIL' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/SheilaSimonIL' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/user/SheilaSimonIL' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Secretary Of State', + 'level': ['administrativeArea1'], + 'roles': ['governmentOfficer'], + 'district': { + 'name': 'Illinois', + 'scope': 'statewide', + 'id': 'ocd-division/country:us/state:il' + }, + 'candidates': [{ + 'name': 'Christopher Michel', + 'party': 'Libertarian', + 'candidateUrl': 'http://chrisforillinois.org/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/ChrisMichelforIllinois' + }] + }, { + 'name': 'Jesse White', + 'party': 'Democratic' + }, { + 'name': 'Michael Webster', + 'party': 'Republican', + 'candidateUrl': 'http://websterforillinois.net/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/MikeWebsterIL' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/MikeWebsterIL' + }, { + 'type': 'GooglePlus', + 'id': 'https://plus.google.com/106530502764515758186' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/user/MikeWebsterIL' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Attorney General', + 'level': ['administrativeArea1'], + 'roles': ['governmentOfficer'], + 'district': { + 'name': 'Illinois', + 'scope': 'statewide', + 'id': 'ocd-division/country:us/state:il' + }, + 'candidates': [{ + 'name': 'Ben Koyl', + 'party': 'Libertarian', + 'candidateUrl': 'http://koyl4ilattorneygeneral.com/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/Koyl4AttorneyGeneral' + }] + }, { + 'name': 'Lisa Madigan', + 'party': 'Democratic', + 'candidateUrl': 'http://lisamadigan.org/splash', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/lisamadigan' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/LisaMadigan' + }, { + 'type': 'GooglePlus', + 'id': 'https://plus.google.com/106732728212286274178' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/user/LisaMadigan' + }] + }, { + 'name': 'Paul M. Schimpf', + 'party': 'Republican', + 'candidateUrl': 'http://www.schimpf4illinois.com/contact_us?splash=1', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/136912986515438' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/Schimpf_4_IL_AG' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Treasurer', + 'level': ['administrativeArea1'], + 'roles': ['governmentOfficer'], + 'district': { + 'name': 'Illinois', + 'scope': 'statewide', + 'id': 'ocd-division/country:us/state:il' + }, + 'candidates': [{ + 'name': 'Matthew Skopek', + 'party': 'Libertarian', + 'candidateUrl': 'http://www.matthewskopek.com/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/TransparentandResponsibleGoverment' + }] + }, { + 'name': 'Michael W. Frerichs', + 'party': 'Democratic', + 'candidateUrl': 'http://frerichsforillinois.com/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/mikeforillinois' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/mikeforillinois' + }, { + 'type': 'GooglePlus', + 'id': 'https://plus.google.com/116963380840614292664' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/channel/UCX77L5usHWxrr0BdOv0r8Dg' + }] + }, { + 'name': 'Tom Cross', + 'party': 'Republican', + 'candidateUrl': 'http://jointomcross.com', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/JoinTomCross' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/JoinTomCross' + }, { + 'type': 'GooglePlus', + 'id': 'https://plus.google.com/117776663930603924689' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/channel/UCDBLEvIGHJX1kIc_eZL5qPw' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'State House - District 4', + 'level': ['administrativeArea1'], + 'roles': ['legislatorLowerBody'], + 'district': { + 'name': 'Illinois State House district 4', + 'scope': 'stateLower', + 'id': 'ocd-division/country:us/state:il/sldl:4' + }, + 'candidates': [{ + 'name': 'Cynthia Soto', + 'party': 'Democratic' + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook County Treasurer', + 'level': ['administrativeArea2'], + 'roles': ['governmentOfficer'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Maria Pappas', + 'party': 'Democratic' + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook County Clerk', + 'level': ['administrativeArea2'], + 'roles': ['governmentOfficer'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'David D. Orr', + 'party': 'Democratic', + 'candidateUrl': 'http://www.davidorr.org/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/ClerkOrr' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/cookcountyclerk' + }, { + 'type': 'YouTube', + 'id': 'https://www.youtube.com/user/TheDavidOrr' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook County Sheriff', + 'level': ['administrativeArea2'], + 'roles': ['governmentOfficer'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Thomas J. Dart', + 'party': 'Democratic', + 'candidateUrl': 'http://www.sherifftomdart.com/', + 'channels': [{ + 'type': 'Twitter', + 'id': 'https://twitter.com/TomDart' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook County Assessor', + 'level': ['administrativeArea2'], + 'roles': ['governmentOfficer'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Joseph Berrios', + 'party': 'Democratic', + 'candidateUrl': 'http://www.electjoeberrios.com/' + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook County Board President', + 'level': ['administrativeArea2'], + 'roles': ['legislatorUpperBody'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Toni Preckwinkle', + 'party': 'Democratic', + 'candidateUrl': 'http://www.tonipreckwinkle.org/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/196166530417661' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/ToniPreckwinkle' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook Circuit - Arnold Vacancy', + 'level': ['administrativeArea2'], + 'roles': ['judge'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Bridget Anne Mitchell', + 'party': 'Democratic', + 'candidateUrl': 'http://mitchellforjudge.com', + 'email': 'bridget@mitchellforjudge.com' + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook Circuit - Reyes Vacancy', + 'level': ['administrativeArea2'], + 'roles': ['judge'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Diana Rosario', + 'party': 'Democratic' + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook Circuit - Howse, Jr. Vacancy', + 'level': ['administrativeArea2'], + 'roles': ['judge'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Caroline Kate Moreland', + 'party': 'Democratic', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/judgemoreland' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook Circuit - Neville, Jr. Vacancy', + 'level': ['administrativeArea2'], + 'roles': ['judge'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'William B. Raines', + 'party': 'Democratic' + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook Circuit - Egan Vacancy', + 'level': ['administrativeArea2'], + 'roles': ['judge'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Daniel J. Kubasiak', + 'party': 'Democratic', + 'candidateUrl': 'http://www.judgedank.org/', + 'email': 'Info@JudgeDanK.org' + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook Circuit - Connors Vacancy', + 'level': ['administrativeArea2'], + 'roles': ['judge'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Kristal Rivers', + 'party': 'Democratic', + 'candidateUrl': 'http://rivers4judge.org/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/193818317451678' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/Rivers4Judge' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook Circuit - McDonald Vacancy', + 'level': ['administrativeArea2'], + 'roles': ['judge'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Cynthia Y. Cobbs', + 'party': 'Democratic', + 'candidateUrl': 'http://judgecobbs.com/', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/1387935061420024' + }, { + 'type': 'Twitter', + 'id': 'https://twitter.com/judgecobbs' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook Circuit - Lowrance Vacancy', + 'level': ['administrativeArea2'], + 'roles': ['judge'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Thomas J. Carroll', + 'party': 'Democratic' + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook Circuit - Veal Vacancy', + 'level': ['administrativeArea2'], + 'roles': ['judge'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Andrea Michele Buford', + 'party': 'Democratic', + 'channels': [{ + 'type': 'Facebook', + 'id': 'https://www.facebook.com/ElectJudgeBufordForTheBench' + }] + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook Circuit - Burke Vacancy', + 'level': ['administrativeArea2'], + 'roles': ['judge'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': 'Maritza Martinez', + 'party': 'Democratic' + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'General', + 'office': 'Cook Circuit - Felton Vacancy', + 'level': ['administrativeArea2'], + 'roles': ['judge'], + 'district': { + 'name': 'Cook County', + 'scope': 'countywide', + 'id': 'ocd-division/country:us/state:il/county:cook' + }, + 'candidates': [{ + 'name': "Patricia O'Brien Sheahan", + 'party': 'Democratic', + 'candidateUrl': 'http://sheahanforjudge.com/' + }], + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'Referendum', + 'district': { + 'name': 'Illinois', + 'scope': 'statewide', + 'id': 'ocd-division/country:us/state:il' + }, + 'referendumTitle': 'CONSTITUTION BALLOT PROPOSED AMENDMENT TO THE 1970 ILLINOIS CONSTITUTION (1)', + 'referendumSubtitle': '"NOTICE THE FAILURE TO VOTE THIS BALLOT MAY BE THE EQUIVALENT OF A NEGATIVE VOTE, BECAUSE A CONVENTION SHALL BE CALLED OR THE AMENDMENT SHALL BECOME EFFECTIVE IF APPROVED BY EITHER THREE-FIFTHS OF THOSE VOTING ON THE QUESTION OR A MAJORITY OF THOSE VOTING IN THE ELECTION. (THIS IS NOT TO BE CONSTRUED AS A DIRECTION THAT YOUR VOTE IS REQUIRED TO BE CAST EITHER IN FAVOR OF OR IN OPPOSITION TO THE PROPOSITION HEREIN CONTAINED.) WHETHER YOU VOTE THIS BALLOT OR NOT YOU MUST RETURN IT TO THE ELECTION JUDGE WHEN YOU LEAVE THE VOTING BOOTH".', + 'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15966', + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'Referendum', + 'district': { + 'name': 'Illinois', + 'scope': 'statewide', + 'id': 'ocd-division/country:us/state:il' + }, + 'referendumTitle': 'CONSTITUTION BALLOT PROPOSED AMENDMENT TO THE 1970 ILLINOIS CONSTITUTION (2)', + 'referendumSubtitle': '"NOTICE THE FAILURE TO VOTE THIS BALLOT MAY BE THE EQUIVALENT OF A NEGATIVE VOTE, BECAUSE A CONVENTION SHALL BE CALLED OR THE AMENDMENT SHALL BECOME EFFECTIVE IF APPROVED BY EITHER THREE-FIFTHS OF THOSE VOTING ON THE QUESTION OR A MAJORITY OF THOSE VOTING IN THE ELECTION. (THIS IS NOT TO BE CONSTRUED AS A DIRECTION THAT YOUR VOTE IS REQUIRED TO BE CAST EITHER IN FAVOR OF OR IN OPPOSITION TO THE PROPOSITION HEREIN CONTAINED.) WHETHER YOU VOTE THIS BALLOT OR NOT YOU MUST RETURN IT TO THE ELECTION JUDGE WHEN YOU LEAVE THE VOTING BOOTH".', + 'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15967', + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'Referendum', + 'district': { + 'name': 'Illinois', + 'scope': 'statewide', + 'id': 'ocd-division/country:us/state:il' + }, + 'referendumTitle': 'STATEWIDE ADVISORY QUESTION (1)', + 'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15738', + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'Referendum', + 'district': { + 'name': 'Illinois', + 'scope': 'statewide', + 'id': 'ocd-division/country:us/state:il' + }, + 'referendumTitle': 'STATEWIDE ADVISORY QUESTION (2)', + 'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15739', + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }, { + 'type': 'Referendum', + 'district': { + 'name': 'Illinois', + 'scope': 'statewide', + 'id': 'ocd-division/country:us/state:il' + }, + 'referendumTitle': 'STATEWIDE ADVISORY QUESTION (3)', + 'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15740', + 'sources': [{ + 'name': 'Ballot Information Project', + 'official': False + }] + }], + 'state': [{ + 'name': 'Illinois', + 'electionAdministrationBody': { + 'name': 'Illinois State Board of Elections', + 'electionInfoUrl': 'http://www.elections.il.gov', + 'votingLocationFinderUrl': 'https://ova.elections.il.gov/PollingPlaceLookup.aspx', + 'ballotInfoUrl': 'https://www.elections.il.gov/ElectionInformation/OfficesUpForElection.aspx?ID=2GLMQa4Rilk%3d', + 'correspondenceAddress': { + 'line1': '2329 S Macarthur Blvd.', + 'city': 'Springfield', + 'state': 'Illinois', + 'zip': '62704-4503' + } + }, + 'local_jurisdiction': { + 'name': 'CITY OF CHICAGO', + 'sources': [{ + 'name': 'Voting Information Project', + 'official': True + }] + }, + 'sources': [{ + 'name': '', + 'official': False + }] + }] } -polling_data = [ +polling_data = [{ + 'passed_address': '900 N Washtenaw, Chicago, IL 60622', + 'polling_locationName': 'UKRAINIAN ORTHDX PATRONAGE CH', + 'polling_address': '904 N WASHTENAW AVE', + 'polling_city': 'CHICAGO', + 'polling_state': 'IL', + 'polling_zip': '60622', + 'source_name': 'Voting Information Project', + 'source_official': True, + 'pollingHours': '', + 'notes': ''}, { - "passed_address": "900 N Washtenaw, Chicago, IL 60622", - "polling_locationName": "UKRAINIAN ORTHDX PATRONAGE CH", - "polling_address": "904 N WASHTENAW AVE", - "polling_city": "CHICAGO", - "polling_state": "IL", - "polling_zip": "60622", - "source_name": "Voting Information Project", - "source_official": True, - "pollingHours": "", - "notes": "", - }, - { - "passed_address": "900 N Washtenaw, Chicago, IL 60622", - "polling_locationName": "UKRAINIAN ORTHDX PATRONAGE CH", - "polling_address": "904 N WASHTENAW AVE", - "polling_city": "CHICAGO", - "polling_state": "IL", - "polling_zip": "60622", - "source_name": "Voting Information Project", - "source_official": True, - "pollingHours": "", - "notes": "", - }, -] + 'passed_address': '900 N Washtenaw, Chicago, IL 60622', + 'polling_locationName': 'UKRAINIAN ORTHDX PATRONAGE CH', + 'polling_address': '904 N WASHTENAW AVE', + 'polling_city': 'CHICAGO', + 'polling_state': 'IL', + 'polling_zip': '60622', + 'source_name': 'Voting Information Project', + 'source_official': True, + 'pollingHours': '', + 'notes': '' + }] + diff --git a/test/test_google/test_google_admin.py b/test/test_google/test_google_admin.py deleted file mode 100644 index 3f746f635f..0000000000 --- a/test/test_google/test_google_admin.py +++ /dev/null @@ -1,67 +0,0 @@ -from parsons.etl.table import Table -from parsons.google.google_admin import GoogleAdmin -from test.utils import assert_matching_tables -from unittest.mock import MagicMock -import unittest - - -class MockGoogleAdmin(GoogleAdmin): - def __init__(self): - self.client = MagicMock() - - -class TestGoogleAdmin(unittest.TestCase): - mock_aliases = Table( - [{"alias": "fakeemail7@fakedomain.com"}, {"alias": "fakeemail8@fakedomain.com"}] - ) - mock_all_group_members = Table([{"email": "fakeemail4@fakedomain.com"}]) - mock_all_groups = Table( - [ - { - "aliases": ["fakeemail7@fakedomain.com", "fakeemail8@fakedomain.com"], - "email": "fakeemail4@fakedomain.com", - "id": 1, - }, - {"aliases": None, "email": "fakeemail5@fakedomain.com", "id": 2}, - {"aliases": None, "email": "fakeemail6@fakedomain.com", "id": 3}, - ] - ) - - def setUp(self): - self.google_admin = MockGoogleAdmin() - - def test_aliases(self): - self.google_admin.client.request = MagicMock( - return_value=( - "", - '{"aliases": [{"alias": "fakeemail7@fakedomain.com"},' - '{"alias": "fakeemail8@fakedomain' - '.com"}]}'.encode(), - ) - ) - assert_matching_tables(self.google_admin.get_aliases("1"), self.mock_aliases) - - def test_all_group_members(self): - self.google_admin.client.request = MagicMock( - return_value=( - "", - '{"members": [{"email": "fakeemail4@fakedomain.com"}]}'.encode(), - ) - ) - assert_matching_tables( - self.google_admin.get_all_group_members("1"), self.mock_all_group_members - ) - - def test_all_groups(self): - self.google_admin.client.request = MagicMock( - return_value=( - "", - '{"groups": [{"aliases": ["fakeemail7@fakedomain.com", "fakeemail8@fakedomain.com"], "e' # noqa: E501 - 'mail": "fakeemail4@fakedomain.com", "id": 1}, {"email": "fakeemail5@fakedomain.com", "' # noqa: E501 - 'id": 2}, {"email": "fakeemail6@fakedomain.com", "id": 3}]}'.encode(), - ) - ) - assert_matching_tables( - self.google_admin.get_all_groups({"domain": "fakedomain.com"}), - self.mock_all_groups, - ) diff --git a/test/test_google/test_google_bigquery.py b/test/test_google/test_google_bigquery.py index 4fa3f0ac20..0df76ff6f4 100644 --- a/test/test_google/test_google_bigquery.py +++ b/test/test_google/test_google_bigquery.py @@ -1,64 +1,33 @@ -import json import os +import unittest import unittest.mock as mock - from google.cloud import bigquery from google.cloud import exceptions +from parsons.google.google_bigquery import GoogleBigQuery +from parsons.etl import Table -from parsons import GoogleBigQuery, Table -from parsons.google.google_cloud_storage import GoogleCloudStorage -from test.test_google.test_utilities import FakeCredentialTest - - -class FakeClient: - """A Fake Storage Client used for monkey-patching.""" - - def __init__(self, project=None): - self.project = project - - -class FakeGoogleCloudStorage(GoogleCloudStorage): - """A Fake GoogleCloudStorage object used to test setting up credentials.""" - - @mock.patch("google.cloud.storage.Client", FakeClient) - def __init__(self): - super().__init__(None, None) - - def upload_table( - self, table, bucket_name, blob_name, data_type="csv", default_acl=None - ): - pass - def delete_blob(self, bucket_name, blob_name): - pass - - -class TestGoogleBigQuery(FakeCredentialTest): +class TestGoogleBigQuery(unittest.TestCase): def setUp(self): - super().setUp() - os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = self.cred_path - self.tmp_gcs_bucket = "tmp" - - def tearDown(self) -> None: - super().tearDown() - del os.environ["GOOGLE_APPLICATION_CREDENTIALS"] + os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'foo' + self.tmp_gcs_bucket = 'tmp' def test_query(self): - query_string = "select * from table" + query_string = 'select * from table' # Pass the mock class into our GoogleBigQuery constructor - bq = self._build_mock_client_for_querying([{"one": 1, "two": 2}]) + bq = self._build_mock_client_for_querying([{'one': 1, 'two': 2}]) # Run a query against our parsons GoogleBigQuery class result = bq.query(query_string) # Check our return value self.assertEqual(result.num_rows, 1) - self.assertEqual(result.columns, ["one", "two"]) - self.assertEqual(result[0], {"one": 1, "two": 2}) + self.assertEqual(result.columns, ['one', 'two']) + self.assertEqual(result[0], {'one': 1, 'two': 2}) def test_query__no_results(self): - query_string = "select * from table" + query_string = 'select * from table' # Pass the mock class into our GoogleBigQuery constructor bq = self._build_mock_client_for_querying([]) @@ -71,7 +40,7 @@ def test_query__no_results(self): def test_copy(self): # setup dependencies / inputs - tmp_blob_uri = "gs://tmp/file" + tmp_blob_uri = 'gs://tmp/file' # set up object under test gcs_client = self._build_mock_cloud_storage_client(tmp_blob_uri) @@ -79,12 +48,8 @@ def test_copy(self): bq = self._build_mock_client_for_copying(table_exists=False) # call the method being tested - bq.copy( - tbl, - "dataset.table", - tmp_gcs_bucket=self.tmp_gcs_bucket, - gcs_client=gcs_client, - ) + bq.copy(tbl, 'dataset.table', tmp_gcs_bucket=self.tmp_gcs_bucket, + gcs_client=gcs_client) # check that the method did the right things self.assertEqual(gcs_client.upload_table.call_count, 1) @@ -97,10 +62,9 @@ def test_copy(self): load_call_args = bq.client.load_table_from_uri.call_args self.assertEqual(load_call_args[0][0], tmp_blob_uri) - job_config = load_call_args[1]["job_config"] - self.assertEqual( - job_config.write_disposition, bigquery.WriteDisposition.WRITE_EMPTY - ) + job_config = load_call_args[1]['job_config'] + self.assertEqual(job_config.write_disposition, + bigquery.WriteDisposition.WRITE_EMPTY) # make sure we cleaned up the temp file self.assertEqual(gcs_client.delete_blob.call_count, 1) @@ -114,20 +78,14 @@ def test_copy__if_exists_truncate(self): bq = self._build_mock_client_for_copying() # call the method being tested - bq.copy( - self.default_table, - "dataset.table", - tmp_gcs_bucket=self.tmp_gcs_bucket, - if_exists="truncate", - gcs_client=gcs_client, - ) + bq.copy(self.default_table, 'dataset.table', tmp_gcs_bucket=self.tmp_gcs_bucket, + if_exists='truncate', gcs_client=gcs_client) # check that the method did the right things call_args = bq.client.load_table_from_uri.call_args - job_config = call_args[1]["job_config"] - self.assertEqual( - job_config.write_disposition, bigquery.WriteDisposition.WRITE_TRUNCATE - ) + job_config = call_args[1]['job_config'] + self.assertEqual(job_config.write_disposition, + bigquery.WriteDisposition.WRITE_TRUNCATE) # make sure we cleaned up the temp file self.assertEqual(gcs_client.delete_blob.call_count, 1) @@ -138,20 +96,14 @@ def test_copy__if_exists_append(self): bq = self._build_mock_client_for_copying() # call the method being tested - bq.copy( - self.default_table, - "dataset.table", - tmp_gcs_bucket=self.tmp_gcs_bucket, - if_exists="append", - gcs_client=gcs_client, - ) + bq.copy(self.default_table, 'dataset.table', tmp_gcs_bucket=self.tmp_gcs_bucket, + if_exists='append', gcs_client=gcs_client) # check that the method did the right things call_args = bq.client.load_table_from_uri.call_args - job_config = call_args[1]["job_config"] - self.assertEqual( - job_config.write_disposition, bigquery.WriteDisposition.WRITE_APPEND - ) + job_config = call_args[1]['job_config'] + self.assertEqual(job_config.write_disposition, + bigquery.WriteDisposition.WRITE_APPEND) # make sure we cleaned up the temp file self.assertEqual(gcs_client.delete_blob.call_count, 1) @@ -162,12 +114,8 @@ def test_copy__if_exists_fail(self): # call the method being tested with self.assertRaises(Exception): - bq.copy( - self.default_table, - "dataset.table", - tmp_gcs_bucket=self.tmp_gcs_bucket, - gcs_client=self._build_mock_cloud_storage_client(), - ) + bq.copy(self.default_table, 'dataset.table', tmp_gcs_bucket=self.tmp_gcs_bucket, + gcs_client=self._build_mock_cloud_storage_client()) def test_copy__if_exists_drop(self): gcs_client = self._build_mock_cloud_storage_client() @@ -175,13 +123,8 @@ def test_copy__if_exists_drop(self): bq = self._build_mock_client_for_copying() # call the method being tested - bq.copy( - self.default_table, - "dataset.table", - tmp_gcs_bucket=self.tmp_gcs_bucket, - if_exists="drop", - gcs_client=gcs_client, - ) + bq.copy(self.default_table, 'dataset.table', tmp_gcs_bucket=self.tmp_gcs_bucket, + if_exists='drop', gcs_client=gcs_client) # check that we tried to delete the table self.assertEqual(bq.client.delete_table.call_count, 1) @@ -197,33 +140,8 @@ def test_copy__bad_if_exists(self): # call the method being tested with self.assertRaises(ValueError): - bq.copy( - self.default_table, - "dataset.table", - tmp_gcs_bucket=self.tmp_gcs_bucket, - if_exists="foo", - gcs_client=gcs_client, - ) - - def test_copy__credentials_are_correctly_set(self): - tbl = self.default_table - bq = self._build_mock_client_for_copying(table_exists=False) - - # Pass in our fake GCS Client. - bq.copy( - tbl, - "dataset.table", - tmp_gcs_bucket=self.tmp_gcs_bucket, - gcs_client=FakeGoogleCloudStorage(), - ) - - actual = os.environ["GOOGLE_APPLICATION_CREDENTIALS"] - - with open(actual, "r") as factual: - with open(self.cred_path, "r") as fexpected: - actual_str = factual.read() - self.assertEqual(actual_str, fexpected.read()) - self.assertEqual(self.cred_contents, json.loads(actual_str)) + bq.copy(self.default_table, 'dataset.table', tmp_gcs_bucket=self.tmp_gcs_bucket, + if_exists='foo', gcs_client=gcs_client) def _build_mock_client_for_querying(self, results): # Create a mock that will play the role of the cursor @@ -250,21 +168,19 @@ def _build_mock_client_for_querying(self, results): def _build_mock_client_for_copying(self, table_exists=True): bq_client = mock.MagicMock() if not table_exists: - bq_client.get_table.side_effect = exceptions.NotFound("not found") + bq_client.get_table.side_effect = exceptions.NotFound('not found') bq = GoogleBigQuery() bq._client = bq_client return bq - def _build_mock_cloud_storage_client(self, tmp_blob_uri=""): + def _build_mock_cloud_storage_client(self, tmp_blob_uri=''): gcs_client = mock.MagicMock() gcs_client.upload_table.return_value = tmp_blob_uri return gcs_client @property def default_table(self): - return Table( - [ - {"num": 1, "ltr": "a"}, - {"num": 2, "ltr": "b"}, - ] - ) + return Table([ + {'num': 1, 'ltr': 'a'}, + {'num': 2, 'ltr': 'b'}, + ]) diff --git a/test/test_google/test_google_cloud_storage.py b/test/test_google/test_google_cloud_storage.py index a264c701e5..246edd64a1 100644 --- a/test/test_google/test_google_cloud_storage.py +++ b/test/test_google/test_google_cloud_storage.py @@ -1,19 +1,19 @@ import unittest -from parsons import GoogleCloudStorage, Table +from parsons.google.google_cloud_storage import GoogleCloudStorage +from parsons.etl import Table from test.utils import assert_matching_tables from parsons.utilities import files from google.cloud import storage import os -TEMP_BUCKET_NAME = "parsons_test" -TEMP_FILE_NAME = "tmp_file_01.txt" +TEMP_BUCKET_NAME = 'parsons_test' +TEMP_FILE_NAME = 'tmp_file_01.txt' -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test') class TestGoogleStorageBuckets(unittest.TestCase): + def setUp(self): self.cloud = GoogleCloudStorage() @@ -24,7 +24,7 @@ def setUp(self): self.cloud.create_bucket(TEMP_BUCKET_NAME) # Upload a file - tmp_file_path = files.string_to_temp_file("A little string", suffix=".txt") + tmp_file_path = files.string_to_temp_file('A little string', suffix='.txt') self.cloud.put_blob(TEMP_BUCKET_NAME, TEMP_FILE_NAME, tmp_file_path) def test_list_buckets(self): @@ -41,14 +41,12 @@ def test_bucket_exists(self): self.assertTrue(self.cloud.bucket_exists(TEMP_BUCKET_NAME)) # Assert doesn't find a bucket that doesn't exist - self.assertFalse(self.cloud.bucket_exists("NOT_A_REAL_BUCKET")) + self.assertFalse(self.cloud.bucket_exists('NOT_A_REAL_BUCKET')) def test_get_bucket(self): # Assert that a bucket object is returned - self.assertIsInstance( - self.cloud.get_bucket(TEMP_BUCKET_NAME), storage.bucket.Bucket - ) + self.assertIsInstance(self.cloud.get_bucket(TEMP_BUCKET_NAME), storage.bucket.Bucket) def test_create_bucket(self): @@ -59,9 +57,9 @@ def test_create_bucket(self): def test_delete_bucket(self): # Create another bucket, delete it and make sure it doesn't exist - self.cloud.create_bucket(TEMP_BUCKET_NAME + "_2") - self.cloud.delete_bucket(TEMP_BUCKET_NAME + "_2") - self.assertFalse(self.cloud.bucket_exists(TEMP_BUCKET_NAME + "_2")) + self.cloud.create_bucket(TEMP_BUCKET_NAME + '_2') + self.cloud.delete_bucket(TEMP_BUCKET_NAME + '_2') + self.assertFalse(self.cloud.bucket_exists(TEMP_BUCKET_NAME + '_2')) def test_list_blobs(self): @@ -79,7 +77,7 @@ def test_blob_exists(self): self.assertTrue(self.cloud.blob_exists(TEMP_BUCKET_NAME, TEMP_FILE_NAME)) # Assert that it thinks that a non-existent blob doesn't exist - self.assertFalse(self.cloud.blob_exists(TEMP_BUCKET_NAME, "FAKE_BLOB")) + self.assertFalse(self.cloud.blob_exists(TEMP_BUCKET_NAME, 'FAKE_BLOB')) def test_put_blob(self): @@ -90,22 +88,21 @@ def test_get_blob(self): # Assert that a blob object is returned self.assertIsInstance( - self.cloud.get_blob(TEMP_BUCKET_NAME, TEMP_FILE_NAME), storage.blob.Blob - ) + self.cloud.get_blob(TEMP_BUCKET_NAME, TEMP_FILE_NAME), storage.blob.Blob) def test_download_blob(self): # Download blob and ensure that it is the expected file path = self.cloud.download_blob(TEMP_BUCKET_NAME, TEMP_FILE_NAME) - with open(path, "r") as f: - self.assertEqual(f.read(), "A little string") + with open(path, 'r') as f: + self.assertEqual(f.read(), 'A little string') def test_delete_blob(self): - file_name = "delete_me.txt" + file_name = 'delete_me.txt' # Upload a file - tmp_file_path = files.string_to_temp_file("A little string", suffix=".txt") + tmp_file_path = files.string_to_temp_file('A little string', suffix='.txt') self.cloud.put_blob(TEMP_BUCKET_NAME, file_name, tmp_file_path) # Check that it was deleted. @@ -114,8 +111,8 @@ def test_delete_blob(self): def test_get_url(self): - file_name = "delete_me.csv" - input_tbl = Table([["a"], ["1"]]) + file_name = 'delete_me.csv' + input_tbl = Table([['a'], ['1']]) self.cloud.upload_table(input_tbl, TEMP_BUCKET_NAME, file_name) url = self.cloud.get_url(TEMP_BUCKET_NAME, file_name) download_tbl = Table.from_csv(url) diff --git a/test/test_google/test_google_sheets.py b/test/test_google/test_google_sheets.py index 7df50ddf94..d85e633a7f 100644 --- a/test/test_google/test_google_sheets.py +++ b/test/test_google/test_google_sheets.py @@ -2,38 +2,32 @@ import gspread import os -from parsons import GoogleSheets, Table +from parsons.google.google_sheets import GoogleSheets +from parsons.etl.table import Table from test.utils import assert_matching_tables -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test') class TestGoogleSheets(unittest.TestCase): + def setUp(self): self.google_sheets = GoogleSheets() - self.spreadsheet_id = self.google_sheets.create_spreadsheet("parsons_test_01") - self.test_table = Table( - [ - {"first": "Bob", "last": "Smith"}, - {"first": "Sue", "last": "Doe"}, - ] - ) + self.spreadsheet_id = self.google_sheets.create_spreadsheet('parsons_test_01') + self.test_table = Table([ + {'first': 'Bob', 'last': 'Smith'}, + {'first': 'Sue', 'last': 'Doe'}, + ]) self.google_sheets.overwrite_sheet(self.spreadsheet_id, self.test_table) self.second_sheet_title = "2nd" self.google_sheets.add_sheet(self.spreadsheet_id, self.second_sheet_title) - self.second_test_table = Table( - [ - {"city": "San Francisco", "state": "SF"}, - {"city": "Chicago", "state": "IL"}, - ] - ) - self.google_sheets.overwrite_sheet( - self.spreadsheet_id, self.second_test_table, 1 - ) + self.second_test_table = Table([ + {'city': 'San Francisco', 'state': 'SF'}, + {'city': 'Chicago', 'state': 'IL'}, + ]) + self.google_sheets.overwrite_sheet(self.spreadsheet_id, self.second_test_table, 1) def tearDown(self): # self.google_sheets.delete_spreadsheet(self.spreadsheet_id) @@ -41,24 +35,18 @@ def tearDown(self): def test_read_worksheet(self): # This is the spreadsheet called "Legislators 2017 (Test sheet for Parsons)" - table = self.google_sheets.get_worksheet( - "1Y_pZxz-8JZ9QBdq1pXuIk2js_VXeymOUoZhUp1JVEg8" - ) + table = self.google_sheets.get_worksheet('1Y_pZxz-8JZ9QBdq1pXuIk2js_VXeymOUoZhUp1JVEg8') self.assertEqual(541, table.num_rows) def test_read_sheet(self): # Deprecated in Parsons v0.14 # This is the spreadsheet called "Legislators 2017 (Test sheet for Parsons)" - table = self.google_sheets.read_sheet( - "1Y_pZxz-8JZ9QBdq1pXuIk2js_VXeymOUoZhUp1JVEg8" - ) + table = self.google_sheets.read_sheet('1Y_pZxz-8JZ9QBdq1pXuIk2js_VXeymOUoZhUp1JVEg8') self.assertEqual(541, table.num_rows) def test_read_nonexistent_worksheet(self): - self.assertRaises( - gspread.exceptions.APIError, self.google_sheets.read_sheet, "abc123" - ) + self.assertRaises(gspread.exceptions.APIError, self.google_sheets.read_sheet, 'abc123') def test_create_spreadsheet(self): # Created as part of setUp @@ -67,41 +55,31 @@ def test_create_spreadsheet(self): def test_add_sheet(self): # Sheet added as part of setUp # Also tests get_sheet_index_with_title - idx = self.google_sheets.get_worksheet_index( - self.spreadsheet_id, self.second_sheet_title - ) + idx = self.google_sheets.get_worksheet_index(self.spreadsheet_id, self.second_sheet_title) self.assertEqual(1, idx) def test_get_sheet_index_with_bogus_title(self): self.assertRaises( ValueError, - self.google_sheets.get_worksheet_index, - self.spreadsheet_id, - "abc123", - ) + self.google_sheets.get_worksheet_index, self.spreadsheet_id, 'abc123' + ) def test_read_worksheet_with_title(self): - table = self.google_sheets.get_worksheet( - self.spreadsheet_id, self.second_sheet_title - ) + table = self.google_sheets.get_worksheet(self.spreadsheet_id, self.second_sheet_title) self.assertEqual(self.second_test_table.columns, table.columns) def test_append_to_spreadsheet(self): # BROKEN TEST! - append_table = Table( - [ - {"first": "Jim", "last": "Mitchell"}, - {"first": "Lucy", "last": "Simpson"}, - ] - ) + append_table = Table([ + {'first': 'Jim', 'last': 'Mitchell'}, + {'first': 'Lucy', 'last': 'Simpson'}, + ]) self.google_sheets.append_to_sheet(self.spreadsheet_id, append_table) result_table = self.google_sheets.read_sheet(self.spreadsheet_id) self.assertEqual(append_table.columns, result_table.columns) # We should now have rows from both tables - self.assertEqual( - self.test_table.num_rows + append_table.num_rows, result_table.num_rows - ) + self.assertEqual(self.test_table.num_rows + append_table.num_rows, result_table.num_rows) # First check that we didn't muck with the original data for i in range(self.test_table.num_rows): @@ -110,45 +88,38 @@ def test_append_to_spreadsheet(self): # Then check that we appended the data properly for i in range(append_table.num_rows): - self.assertEqual( - append_table.data[i], result_table.data[orig_row_count + i] - ) + self.assertEqual(append_table.data[i], result_table.data[orig_row_count+i]) # Test that we can append to an empty sheet - self.google_sheets.add_sheet(self.spreadsheet_id, "Sheet3") + self.google_sheets.add_sheet(self.spreadsheet_id, 'Sheet3') self.google_sheets.append_to_sheet(self.spreadsheet_id, append_table) def test_append_user_entered_to_spreadsheet(self): # Testing whether we can insert formulas with user_entered_value - self.google_sheets.add_sheet(self.spreadsheet_id, "Sheet3") + self.google_sheets.add_sheet(self.spreadsheet_id, 'Sheet3') - append_table = Table( - [ - {"col1": 3, "col2": 9, "col3": "=A2*B2"}, - {"col1": "Buda", "col2": "Pest", "col3": "=A3&LOWER(B3)"}, - ] - ) + append_table = Table([ + {'col1': 3, 'col2': 9, 'col3': '=A2*B2'}, + {'col1': 'Buda', 'col2': 'Pest', 'col3': '=A3&LOWER(B3)'}, + ]) self.google_sheets.append_to_sheet( - self.spreadsheet_id, append_table, 2, user_entered_value=True - ) + self.spreadsheet_id, append_table, 2, user_entered_value=True) result_table = self.google_sheets.read_sheet(self.spreadsheet_id, 2) # Get the values from col3 which has fomulas - formula_vals = [row["col3"] for row in result_table] + formula_vals = [row['col3'] for row in result_table] # Test that the value is what's expected from each formula - self.assertEqual(formula_vals[0], "27") - self.assertEqual(formula_vals[1], "Budapest") + self.assertEqual(formula_vals[0], '27') + self.assertEqual(formula_vals[1], 'Budapest') def test_overwrite_spreadsheet(self): - new_table = Table( - [ - {"city": "San Francisco", "state": "CA"}, - {"city": "Miami", "state": "FL"}, - {"city": "San Antonio", "state": "TX"}, - ] - ) + new_table = Table([ + {'city': 'San Francisco', 'state': 'CA'}, + {'city': 'Miami', 'state': 'FL'}, + {'city': 'San Antonio', 'state': 'TX'}, + ]) self.google_sheets.overwrite_sheet(self.spreadsheet_id, new_table) result_table = self.google_sheets.read_sheet(self.spreadsheet_id) @@ -158,9 +129,6 @@ def test_share_spreadsheet(self): # Test that sharing of spreadsheet works as intended. self.google_sheets.share_spreadsheet( - self.spreadsheet_id, "bob@bob.com", role="reader", notify=True - ) - permissions = self.google_sheets.get_spreadsheet_permissions( - self.spreadsheet_id - ) - self.assertIn("bob@bob.com", permissions["emailAddress"]) + self.spreadsheet_id, 'bob@bob.com', role='reader', notify=True) + permissions = self.google_sheets.get_spreadsheet_permissions(self.spreadsheet_id) + self.assertIn('bob@bob.com', permissions['emailAddress']) diff --git a/test/test_google/test_googlecivic.py b/test/test_google/test_googlecivic.py index ed154ec2ec..4e2c501bae 100644 --- a/test/test_google/test_googlecivic.py +++ b/test/test_google/test_googlecivic.py @@ -1,49 +1,47 @@ import unittest import requests_mock -from parsons import Table, GoogleCivic +from parsons.etl import Table +from parsons.google.google_civic import GoogleCivic from googlecivic_responses import elections_resp, voterinfo_resp, polling_data from test.utils import assert_matching_tables class TestGoogleCivic(unittest.TestCase): + def setUp(self): - self.gc = GoogleCivic(api_key="FAKEKEY") + self.gc = GoogleCivic(api_key='FAKEKEY') @requests_mock.Mocker() def test_get_elections(self, m): - m.get(self.gc.uri + "elections", json=elections_resp) + m.get(self.gc.uri + 'elections', json=elections_resp) - expected_tbl = Table(elections_resp["elections"]) + expected_tbl = Table(elections_resp['elections']) assert_matching_tables(self.gc.get_elections(), expected_tbl) @requests_mock.Mocker() def test_get_poll_location(self, m): - m.get(self.gc.uri + "voterinfo", json=voterinfo_resp) + m.get(self.gc.uri + 'voterinfo', json=voterinfo_resp) - expected_tbl = Table(voterinfo_resp["pollingLocations"]) + expected_tbl = Table(voterinfo_resp['pollingLocations']) - tbl = self.gc.get_polling_location(2000, "900 N Washtenaw, Chicago, IL 60622") + tbl = self.gc.get_polling_location(2000, '900 N Washtenaw, Chicago, IL 60622') assert_matching_tables(tbl, expected_tbl) @requests_mock.Mocker() def test_get_poll_locations(self, m): - m.get(self.gc.uri + "voterinfo", json=voterinfo_resp) + m.get(self.gc.uri + 'voterinfo', json=voterinfo_resp) expected_tbl = Table(polling_data) - address_tbl = Table( - [ - ["address"], - ["900 N Washtenaw, Chicago, IL 60622"], - ["900 N Washtenaw, Chicago, IL 60622"], - ] - ) + address_tbl = Table([['address'], + ['900 N Washtenaw, Chicago, IL 60622'], + ['900 N Washtenaw, Chicago, IL 60622']]) tbl = self.gc.get_polling_locations(2000, address_tbl) diff --git a/test/test_google/test_utilities.py b/test/test_google/test_utilities.py deleted file mode 100644 index 618b28dba5..0000000000 --- a/test/test_google/test_utilities.py +++ /dev/null @@ -1,78 +0,0 @@ -import json -import unittest -import os -import tempfile - -from parsons.google import utitities as util - - -class FakeCredentialTest(unittest.TestCase): - def setUp(self) -> None: - self.dir = tempfile.TemporaryDirectory() - self.cred_path = os.path.join(self.dir.name, "mycred.json") - self.cred_contents = { - "client_id": "foobar.apps.googleusercontent.com", - "client_secret": str(hash("foobar")), - "quota_project_id": "project-id", - "refresh_token": str(hash("foobarfoobar")), - "type": "authorized_user", - } - with open(self.cred_path, "w") as f: - json.dump(self.cred_contents, f) - - def tearDown(self) -> None: - self.dir.cleanup() - - -class TestSetupGoogleApplicationCredentials(FakeCredentialTest): - TEST_ENV_NAME = "DUMMY_APP_CREDS" - - def tearDown(self) -> None: - super().tearDown() - del os.environ[self.TEST_ENV_NAME] - - def test_noop_if_env_already_set(self): - os.environ[self.TEST_ENV_NAME] = self.cred_path - util.setup_google_application_credentials(None, self.TEST_ENV_NAME) - self.assertEqual(os.environ[self.TEST_ENV_NAME], self.cred_path) - - def test_accepts_dictionary(self): - util.setup_google_application_credentials( - self.cred_contents, self.TEST_ENV_NAME - ) - actual = os.environ[self.TEST_ENV_NAME] - self.assertTrue(os.path.exists(actual)) - with open(actual, "r") as f: - self.assertEqual(json.load(f), self.cred_contents) - - def test_accepts_string(self): - cred_str = json.dumps(self.cred_contents) - util.setup_google_application_credentials(cred_str, self.TEST_ENV_NAME) - actual = os.environ[self.TEST_ENV_NAME] - self.assertTrue(os.path.exists(actual)) - with open(actual, "r") as f: - self.assertEqual(json.load(f), self.cred_contents) - - def test_accepts_file_path(self): - util.setup_google_application_credentials(self.cred_path, self.TEST_ENV_NAME) - actual = os.environ[self.TEST_ENV_NAME] - self.assertTrue(os.path.exists(actual)) - with open(actual, "r") as f: - self.assertEqual(json.load(f), self.cred_contents) - - def test_credentials_are_valid_after_double_call(self): - # write creds to tmp file... - util.setup_google_application_credentials( - self.cred_contents, self.TEST_ENV_NAME - ) - fst = os.environ[self.TEST_ENV_NAME] - - # repeat w/ default args... - util.setup_google_application_credentials(None, self.TEST_ENV_NAME) - snd = os.environ[self.TEST_ENV_NAME] - - with open(fst, "r") as ffst: - with open(snd, "r") as fsnd: - actual = fsnd.read() - self.assertEqual(self.cred_contents, json.loads(actual)) - self.assertEqual(ffst.read(), actual) diff --git a/test/test_hustle/expected_json.py b/test/test_hustle/expected_json.py index b5376b9911..b1c9b17194 100644 --- a/test/test_hustle/expected_json.py +++ b/test/test_hustle/expected_json.py @@ -1,302 +1,278 @@ + # Expected auth token response json auth_token = { "access_token": "MYFAKETOKEN", "scope": "read:account write:account", "expires_in": 7200, - "token_type": "Bearer", + "token_type": "Bearer" } organizations = { - "items": [ - { - "id": "LePEoKzD3", - "type": "Organization", - "name": "Org A", - "createdAt": "2018-03-01T17:05:15.386Z", - }, - { - "id": "lnCrK2fSD", - "type": "Organization", - "name": "Org B", - "createdAt": "2019-01-17T19:47:18.133Z", - }, - { - "id": "raqlV2HGd", - "type": "Organization", - "name": "Org C", - "createdAt": "2019-01-17T19:49:55.249Z", - }, - ], - "pagination": {"cursor": "WzEsMTAwMF", "hasNextPage": False, "total": 3}, + 'items': [{ + 'id': 'LePEoKzD3', + 'type': 'Organization', + 'name': 'Org A', + 'createdAt': '2018-03-01T17:05:15.386Z' + }, { + 'id': 'lnCrK2fSD', + 'type': 'Organization', + 'name': 'Org B', + 'createdAt': '2019-01-17T19:47:18.133Z' + }, { + 'id': 'raqlV2HGd', + 'type': 'Organization', + 'name': 'Org C', + 'createdAt': '2019-01-17T19:49:55.249Z' + }], + 'pagination': { + 'cursor': 'WzEsMTAwMF', + 'hasNextPage': False, + 'total': 3 + } } organization = { - "id": "LePEoKzD3", - "type": "Organization", - "name": "Org A", - "createdAt": "2018-03-01T17:05:15.386Z", + 'id': 'LePEoKzD3', + 'type': 'Organization', + 'name': 'Org A', + 'createdAt': '2018-03-01T17:05:15.386Z' } groups = { - "items": [ - { - "id": "Qqp6o90Si", - "type": "Group", - "name": "Group A", - "countryCode": "US", - "active": True, - "location": "New York, NY, USA", - "organizationId": "LePEoKzD3", - "preferredAreaCodes": [], - "timezone": "America/New_York", - "createdAt": "2018-08-02T14:50:39.353Z", - }, - { - "id": "ygEXcRLEM", - "type": "Group", - "name": "Group A", - "countryCode": "US", - "active": True, - "location": "New York, NY, USA", - "organizationId": "LePEoKzD3", - "preferredAreaCodes": [], - "timezone": "America/New_York", - "createdAt": "2018-12-19T18:43:30.253Z", - }, - { - "id": "svwi7gGV7", - "type": "Group", - "name": "Group A", - "countryCode": "US", - "active": True, - "location": "New York, NY, USA", - "organizationId": "LePEoKzD3", - "preferredAreaCodes": [], - "timezone": "America/New_York", - "createdAt": "2018-12-21T20:39:05.178Z", - }, - { - "id": "XtXPxbYGv", - "type": "Group", - "name": "Group A", - "description": "A Group.", - "countryCode": "US", - "active": True, - "location": "New York, NY, USA", - "organizationId": "LePEoKzD3", - "preferredAreaCodes": [], - "timezone": "America/New_York", - "createdAt": "2018-10-11T16:36:52.245Z", - }, - ], - "pagination": {"cursor": "WzEsMTAwMF", "hasNextPage": False, "total": 233}, + 'items': [{ + 'id': 'Qqp6o90Si', + 'type': 'Group', + 'name': 'Group A', + 'countryCode': 'US', + 'active': True, + 'location': 'New York, NY, USA', + 'organizationId': 'LePEoKzD3', + 'preferredAreaCodes': [], + 'timezone': 'America/New_York', + 'createdAt': '2018-08-02T14:50:39.353Z' + }, { + 'id': 'ygEXcRLEM', + 'type': 'Group', + 'name': 'Group A', + 'countryCode': 'US', + 'active': True, + 'location': 'New York, NY, USA', + 'organizationId': 'LePEoKzD3', + 'preferredAreaCodes': [], + 'timezone': 'America/New_York', + 'createdAt': '2018-12-19T18:43:30.253Z' + }, { + 'id': 'svwi7gGV7', + 'type': 'Group', + 'name': 'Group A', + 'countryCode': 'US', + 'active': True, + 'location': 'New York, NY, USA', + 'organizationId': 'LePEoKzD3', + 'preferredAreaCodes': [], + 'timezone': 'America/New_York', + 'createdAt': '2018-12-21T20:39:05.178Z' + }, { + 'id': 'XtXPxbYGv', + 'type': 'Group', + 'name': 'Group A', + 'description': 'A Group.', + 'countryCode': 'US', + 'active': True, + 'location': 'New York, NY, USA', + 'organizationId': 'LePEoKzD3', + 'preferredAreaCodes': [], + 'timezone': 'America/New_York', + 'createdAt': '2018-10-11T16:36:52.245Z' + }], + 'pagination': { + 'cursor': 'WzEsMTAwMF', + 'hasNextPage': False, + 'total': 233 + } } group = { - "id": "zajXdqtzRt", - "type": "Group", - "name": "Group A", - "countryCode": "US", - "active": True, - "location": "Colorado, USA", - "organizationId": "lnCrK2fSD", - "preferredAreaCodes": [], - "timezone": "America/Denver", - "createdAt": "2019-08-27T13:52:41.986Z", + 'id': 'zajXdqtzRt', + 'type': 'Group', + 'name': 'Group A', + 'countryCode': 'US', + 'active': True, + 'location': 'Colorado, USA', + 'organizationId': 'lnCrK2fSD', + 'preferredAreaCodes': [], + 'timezone': 'America/Denver', + 'createdAt': '2019-08-27T13:52:41.986Z' } lead = { - "id": "A6ebDlAtqB", - "type": "Lead", - "customFields": {}, - "globalOptedOut": False, - "groupIds": ["cMCH0hxwGt"], - "firstName": "Barack", - "lastName": "Obama", - "organizationId": "LePEoKzD3Z", - "phoneNumber": "+15126993336", - "tagIds": [], - "createdAt": "2019-09-18T03:53:21.381Z", + 'id': 'A6ebDlAtqB', + 'type': 'Lead', + 'customFields': {}, + 'globalOptedOut': False, + 'groupIds': ['cMCH0hxwGt'], + 'firstName': 'Barack', + 'lastName': 'Obama', + 'organizationId': 'LePEoKzD3Z', + 'phoneNumber': '+15126993336', + 'tagIds': [], + 'createdAt': '2019-09-18T03:53:21.381Z' } leads_tbl_01 = { - "id": "yK5jo2tlms", - "type": "Lead", - "customFields": {"address": "123 Main Street"}, - "globalOptedOut": False, - "groupIds": ["cMCH0hxwGt"], - "firstName": "Lyndon", - "lastName": "Johnson", - "organizationId": "LePEoKzD3Z", - "phoneNumber": "+14435705355", - "tagIds": [], - "createdAt": "2019-09-20T22:15:46.706Z", + 'id': 'yK5jo2tlms', + 'type': 'Lead', + 'customFields': { + 'address': '123 Main Street' + }, + 'globalOptedOut': False, + 'groupIds': ['cMCH0hxwGt'], + 'firstName': 'Lyndon', + 'lastName': 'Johnson', + 'organizationId': 'LePEoKzD3Z', + 'phoneNumber': '+14435705355', + 'tagIds': [], + 'createdAt': '2019-09-20T22:15:46.706Z' } leads_tbl_02 = { - "id": "t18JdlHW7r", - "type": "Lead", - "customFields": {"address": "124 Main Street"}, - "globalOptedOut": False, - "groupIds": ["cMCH0hxwGt"], - "firstName": "Ann", - "lastName": "Richards", - "organizationId": "LePEoKzD3Z", - "phoneNumber": "+14435705354", - "tagIds": [], - "createdAt": "2019-09-20T22:15:48.033Z", -} - -created_leads = [ - { - "id": "yK5jo2tlms", - "type": "Lead", - "customFields": {"address": "123 Main Street"}, - "globalOptedOut": False, - "groupIds": ["cMCH0hxwGt"], - "firstName": "Lyndon", - "lastName": "Johnson", - "organizationId": "LePEoKzD3Z", - "phoneNumber": "+14435705355", - "tagIds": [], - "createdAt": "2019-09-20T22:15:46.706Z", - }, - { - "id": "t18JdlHW7r", - "type": "Lead", - "customFields": {"address": "124 Main Street"}, - "globalOptedOut": False, - "groupIds": ["cMCH0hxwGt"], - "firstName": "Ann", - "lastName": "Richards", - "organizationId": "LePEoKzD3Z", - "phoneNumber": "+14435705354", - "tagIds": [], - "createdAt": "2019-09-20T22:15:48.033Z", + 'id': 't18JdlHW7r', + 'type': 'Lead', + 'customFields': { + 'address': '124 Main Street' }, -] + 'globalOptedOut': False, + 'groupIds': ['cMCH0hxwGt'], + 'firstName': 'Ann', + 'lastName': 'Richards', + 'organizationId': 'LePEoKzD3Z', + 'phoneNumber': '+14435705354', + 'tagIds': [], + 'createdAt': '2019-09-20T22:15:48.033Z' +} leads = { - "items": [ - { - "id": "wqy78hlz2", - "type": "Lead", - "customFields": {}, - "globalOptedOut": False, - "groupIds": ["cMCH0hxwGt"], - "firstName": "Elizabeth", - "lastName": "Warren", - "organizationId": "LePEoKzD3Z", - "phoneNumber": "+14435705355", - "tagIds": [], - "createdAt": "2019-09-17T21:25:51.442Z", - }, - { - "id": "A6ebDlAtqB", - "type": "Lead", - "customFields": {}, - "globalOptedOut": False, - "groupIds": ["cMCH0hxwGt"], - "firstName": "Barack", - "lastName": "Obama", - "organizationId": "LePEoKzD3Z", - "phoneNumber": "+15126993336", - "tagIds": [], - "createdAt": "2019-09-18T03:53:21.381Z", - }, - ], - "pagination": {"cursor": "WzEsMTAwMF", "hasNextPage": False, "total": 2}, + 'items': [{ + 'id': 'wqy78hlz2', + 'type': 'Lead', + 'customFields': {}, + 'globalOptedOut': False, + 'groupIds': ['cMCH0hxwGt'], + 'firstName': 'Elizabeth', + 'lastName': 'Warren', + 'organizationId': 'LePEoKzD3Z', + 'phoneNumber': '+14435705355', + 'tagIds': [], + 'createdAt': '2019-09-17T21:25:51.442Z' + }, { + 'id': 'A6ebDlAtqB', + 'type': 'Lead', + 'customFields': {}, + 'globalOptedOut': False, + 'groupIds': ['cMCH0hxwGt'], + 'firstName': 'Barack', + 'lastName': 'Obama', + 'organizationId': 'LePEoKzD3Z', + 'phoneNumber': '+15126993336', + 'tagIds': [], + 'createdAt': '2019-09-18T03:53:21.381Z' + }], + 'pagination': { + 'cursor': 'WzEsMTAwMF', + 'hasNextPage': False, + 'total': 2 + } } updated_lead = { - "id": "wqy78hlz2T", - "type": "Lead", - "customFields": {}, - "globalOptedOut": False, - "groupIds": ["cMCH0hxwGt"], - "firstName": "Bob", - "lastName": "Burchard", - "organizationId": "LePEoKzD3Z", - "phoneNumber": "+14435705356", - "tagIds": [], - "createdAt": "2019-09-17T21:25:51.442Z", + 'id': 'wqy78hlz2T', + 'type': 'Lead', + 'customFields': {}, + 'globalOptedOut': False, + 'groupIds': ['cMCH0hxwGt'], + 'firstName': 'Bob', + 'lastName': 'Burchard', + 'organizationId': 'LePEoKzD3Z', + 'phoneNumber': '+14435705356', + 'tagIds': [], + 'createdAt': '2019-09-17T21:25:51.442Z' } tags = { - "items": [ - { - "id": "zEx5rjbg5", - "type": "Tag", - "name": "2018_09_14_Hustle", - "organizationId": "LePEoKzD3", - "createdAt": "2018-09-17T18:59:09.903Z", - }, - { - "id": "lfGhS5weS", - "type": "Tag", - "name": "2018_09_19_Hustle", - "organizationId": "LePEoKzD3", - "createdAt": "2018-09-18T20:53:16.096Z", - }, - { - "id": "a8DV7DC5E", - "type": "Tag", - "name": "2018_09_21_Hustle", - "organizationId": "LePEoKzD3", - "createdAt": "2018-09-25T16:28:32.868Z", - }, - { - "id": "6bHjStnDD", - "type": "Tag", - "name": "wrong_number", - "organizationId": "LePEoKzD3", - "createdAt": "2018-03-01T17:08:44.296Z", - }, - ], - "pagination": {"cursor": "WzEsMTAwMF0", "hasNextPage": False, "total": 4}, + 'items': [{ + 'id': 'zEx5rjbg5', + 'type': 'Tag', + 'name': '2018_09_14_Hustle', + 'organizationId': 'LePEoKzD3', + 'createdAt': '2018-09-17T18:59:09.903Z' + }, { + 'id': 'lfGhS5weS', + 'type': 'Tag', + 'name': '2018_09_19_Hustle', + 'organizationId': 'LePEoKzD3', + 'createdAt': '2018-09-18T20:53:16.096Z' + }, { + 'id': 'a8DV7DC5E', + 'type': 'Tag', + 'name': '2018_09_21_Hustle', + 'organizationId': 'LePEoKzD3', + 'createdAt': '2018-09-25T16:28:32.868Z' + }, { + 'id': '6bHjStnDD', + 'type': 'Tag', + 'name': 'wrong_number', + 'organizationId': 'LePEoKzD3', + 'createdAt': '2018-03-01T17:08:44.296Z' + }], + 'pagination': { + 'cursor': 'WzEsMTAwMF0', + 'hasNextPage': False, + 'total': 4 + } } tag = { - "id": "zEx5rjbg", - "type": "Tag", - "name": "2018_09_14_Hustle", - "organizationId": "LePEoKzD3", - "createdAt": "2018-09-17T18:59:09.903Z", + 'id': 'zEx5rjbg', + 'type': 'Tag', + 'name': '2018_09_14_Hustle', + 'organizationId': 'LePEoKzD3', + 'createdAt': '2018-09-17T18:59:09.903Z' } agents = { - "items": [ - { - "id": "CrJUBI1CF", - "type": "Agent", - "name": "Bob", - "fullName": "Bob Smith", - "phoneNumber": "+12032498764", - "organizationId": "LePEoKzD3", - "groupId": "Qqp6o90Si", - "createdAt": "2018-08-02T14:50:48.519Z", - }, - { - "id": "vnBImjcoen", - "type": "Agent", - "name": "Angela", - "fullName": "Jones", - "phoneNumber": "+14156529028", - "organizationId": "LePEoKzD3", - "groupId": "Qqp6o90Si", - "createdAt": "2018-08-02T14:50:54.254Z", - }, - ], - "pagination": {"cursor": "WzEsMTAwMF0", "hasNextPage": False, "total": 2}, + 'items': [{ + 'id': 'CrJUBI1CF', + 'type': 'Agent', + 'name': 'Bob', + 'fullName': 'Bob Smith', + 'phoneNumber': '+12032498764', + 'organizationId': 'LePEoKzD3', + 'groupId': 'Qqp6o90Si', + 'createdAt': '2018-08-02T14:50:48.519Z' + }, { + 'id': 'vnBImjcoen', + 'type': 'Agent', + 'name': 'Angela', + 'fullName': 'Jones', + 'phoneNumber': '+14156529028', + 'organizationId': 'LePEoKzD3', + 'groupId': 'Qqp6o90Si', + 'createdAt': '2018-08-02T14:50:54.254Z' + }], + 'pagination': { + 'cursor': 'WzEsMTAwMF0', + 'hasNextPage': False, + 'total': 2 + } } agent = { - "id": "CrJUBI1CF", - "type": "Agent", - "name": "Angela", - "fullName": "Angela Jones", - "phoneNumber": "+12032498764", - "organizationId": "LePEoKzD3", - "groupId": "Qqp6o90Si", - "createdAt": "2018-08-02T14:50:48.519Z", + 'id': 'CrJUBI1CF', + 'type': 'Agent', + 'name': 'Angela', + 'fullName': 'Angela Jones', + 'phoneNumber': '+12032498764', + 'organizationId': 'LePEoKzD3', + 'groupId': 'Qqp6o90Si', + 'createdAt': '2018-08-02T14:50:48.519Z' } diff --git a/test/test_hustle/test_hustle.py b/test/test_hustle/test_hustle.py index fddef221f4..e28016d81b 100644 --- a/test/test_hustle/test_hustle.py +++ b/test/test_hustle/test_hustle.py @@ -2,154 +2,143 @@ import requests_mock from test.utils import assert_matching_tables from test.test_hustle import expected_json -from parsons import Table, Hustle +from parsons.etl import Table +from parsons.hustle import Hustle from parsons.hustle.hustle import HUSTLE_URI -CLIENT_ID = "FAKE_ID" -CLIENT_SECRET = "FAKE_SECRET" +CLIENT_ID = 'FAKE_ID' +CLIENT_SECRET = 'FAKE_SECRET' class TestHustle(unittest.TestCase): + @requests_mock.Mocker() def setUp(self, m): - m.post(HUSTLE_URI + "oauth/token", json=expected_json.auth_token) + m.post(HUSTLE_URI + 'oauth/token', json=expected_json.auth_token) self.hustle = Hustle(CLIENT_ID, CLIENT_SECRET) @requests_mock.Mocker() def test_auth_token(self, m): - self.assertEqual( - self.hustle.auth_token, expected_json.auth_token["access_token"] - ) + self.assertEqual(self.hustle.auth_token, expected_json.auth_token['access_token']) @requests_mock.Mocker() def test_get_organizations(self, m): - m.get(HUSTLE_URI + "organizations", json=expected_json.organizations) + m.get(HUSTLE_URI + 'organizations', json=expected_json.organizations) orgs = self.hustle.get_organizations() - assert_matching_tables(orgs, Table(expected_json.organizations["items"])) + assert_matching_tables(orgs, Table(expected_json.organizations['items'])) @requests_mock.Mocker() def test_get_organization(self, m): - m.get(HUSTLE_URI + "organizations/LePEoKzD3", json=expected_json.organization) - org = self.hustle.get_organization("LePEoKzD3") + m.get(HUSTLE_URI + 'organizations/LePEoKzD3', json=expected_json.organization) + org = self.hustle.get_organization('LePEoKzD3') self.assertEqual(org, expected_json.organization) @requests_mock.Mocker() def test_get_groups(self, m): - m.get(HUSTLE_URI + "organizations/LePEoKzD3/groups", json=expected_json.groups) - groups = self.hustle.get_groups("LePEoKzD3") - assert_matching_tables(groups, Table(expected_json.groups["items"])) + m.get(HUSTLE_URI + 'organizations/LePEoKzD3/groups', json=expected_json.groups) + groups = self.hustle.get_groups('LePEoKzD3') + assert_matching_tables(groups, Table(expected_json.groups['items'])) @requests_mock.Mocker() def test_get_group(self, m): - m.get(HUSTLE_URI + "groups/zajXdqtzRt", json=expected_json.group) - org = self.hustle.get_group("zajXdqtzRt") + m.get(HUSTLE_URI + 'groups/zajXdqtzRt', json=expected_json.group) + org = self.hustle.get_group('zajXdqtzRt') self.assertEqual(org, expected_json.group) @requests_mock.Mocker() def test_create_lead(self, m): - m.post(HUSTLE_URI + "groups/cMCH0hxwGt/leads", json=expected_json.lead) - lead = self.hustle.create_lead( - "cMCH0hxwGt", "Barack", "5126993336", last_name="Obama" - ) + m.post(HUSTLE_URI + 'groups/cMCH0hxwGt/leads', json=expected_json.lead) + lead = self.hustle.create_lead('cMCH0hxwGt', 'Barack', '5126993336', last_name='Obama') self.assertEqual(lead, expected_json.lead) @requests_mock.Mocker() def test_create_leads(self, m): - m.post( - HUSTLE_URI + "groups/cMCH0hxwGt/leads", - [ - {"json": expected_json.leads_tbl_01}, - {"json": expected_json.leads_tbl_02}, - ], - ) - - tbl = Table( - [ - ["phone_number", "ln", "first_name"], - ["4435705355", "Johnson", "Lyndon"], - ["4435705354", "Richard", "Ann"], - ] - ) - ids = self.hustle.create_leads(tbl, group_id="cMCH0hxwGt") - assert_matching_tables(ids, Table(expected_json.created_leads)) + m.post(HUSTLE_URI + 'groups/cMCH0hxwGt/leads', json=expected_json.leads_tbl_01) + + tbl = Table([['phone_number', 'ln', 'first_name'], + ['4435705355', 'Warren', 'Elizabeth'], + ['5126993336', 'Obama', 'Barack']]) + ids = self.hustle.create_leads(tbl, group_id='cMCH0hxwGt') + assert_matching_tables(ids, Table(expected_json.leads['items'])) @requests_mock.Mocker() def test_update_lead(self, m): - m.put(HUSTLE_URI + "leads/wqy78hlz2T", json=expected_json.updated_lead) - updated_lead = self.hustle.update_lead("wqy78hlz2T", first_name="Bob") + m.put(HUSTLE_URI + 'leads/wqy78hlz2T', json=expected_json.updated_lead) + updated_lead = self.hustle.update_lead('wqy78hlz2T', first_name='Bob') self.assertEqual(updated_lead, expected_json.updated_lead) @requests_mock.Mocker() def test_get_leads(self, m): # By Organization - m.get(HUSTLE_URI + "organizations/cMCH0hxwGt/leads", json=expected_json.leads) - leads = self.hustle.get_leads(organization_id="cMCH0hxwGt") - assert_matching_tables(leads, Table(expected_json.leads["items"])) + m.get(HUSTLE_URI + 'organizations/cMCH0hxwGt/leads', json=expected_json.leads) + leads = self.hustle.get_leads(organization_id='cMCH0hxwGt') + assert_matching_tables(leads, Table(expected_json.leads['items'])) # By Group ID - m.get(HUSTLE_URI + "groups/cMCH0hxwGt/leads", json=expected_json.leads) - leads = self.hustle.get_leads(group_id="cMCH0hxwGt") - assert_matching_tables(leads, Table(expected_json.leads["items"])) + m.get(HUSTLE_URI + 'groups/cMCH0hxwGt/leads', json=expected_json.leads) + leads = self.hustle.get_leads(group_id='cMCH0hxwGt') + assert_matching_tables(leads, Table(expected_json.leads['items'])) @requests_mock.Mocker() def test_get_lead(self, m): - m.get(HUSTLE_URI + "leads/wqy78hlz2T", json=expected_json.lead) - lead = self.hustle.get_lead("wqy78hlz2T") + m.get(HUSTLE_URI + 'leads/wqy78hlz2T', json=expected_json.lead) + lead = self.hustle.get_lead('wqy78hlz2T') self.assertEqual(lead, expected_json.lead) @requests_mock.Mocker() def test_get_tags(self, m): - m.get(HUSTLE_URI + "organizations/LePEoKzD3/tags", json=expected_json.tags) - tags = self.hustle.get_tags(organization_id="LePEoKzD3") - assert_matching_tables(tags, Table(expected_json.tags["items"])) + m.get(HUSTLE_URI + 'organizations/LePEoKzD3/tags', json=expected_json.tags) + tags = self.hustle.get_tags(organization_id='LePEoKzD3') + assert_matching_tables(tags, Table(expected_json.tags['items'])) @requests_mock.Mocker() def test_get_tag(self, m): - m.get(HUSTLE_URI + "tags/zEx5rjbg5", json=expected_json.tag) - tag = self.hustle.get_tag("zEx5rjbg5") + m.get(HUSTLE_URI + 'tags/zEx5rjbg5', json=expected_json.tag) + tag = self.hustle.get_tag('zEx5rjbg5') self.assertEqual(tag, expected_json.tag) @requests_mock.Mocker() def test_get_agents(self, m): - m.get(HUSTLE_URI + "groups/Qqp6o90SiE/agents", json=expected_json.agents) - agents = self.hustle.get_agents(group_id="Qqp6o90SiE") - assert_matching_tables(agents, Table(expected_json.agents["items"])) + m.get(HUSTLE_URI + 'groups/Qqp6o90SiE/agents', json=expected_json.agents) + agents = self.hustle.get_agents(group_id='Qqp6o90SiE') + assert_matching_tables(agents, Table(expected_json.agents['items'])) @requests_mock.Mocker() def test_get_agent(self, m): - m.get(HUSTLE_URI + "agents/CrJUBI1CF", json=expected_json.agent) - agent = self.hustle.get_agent("CrJUBI1CF") + m.get(HUSTLE_URI + 'agents/CrJUBI1CF', json=expected_json.agent) + agent = self.hustle.get_agent('CrJUBI1CF') self.assertEqual(agent, expected_json.agent) @requests_mock.Mocker() def test_create_agent(self, m): - m.post(HUSTLE_URI + "groups/Qqp6o90Si/agents", json=expected_json.agent) - new_agent = self.hustle.create_agent( - "Qqp6o90Si", name="Angela", full_name="Jones", phone_number="12032498764" - ) + m.post(HUSTLE_URI + 'groups/Qqp6o90Si/agents', json=expected_json.agent) + new_agent = self.hustle.create_agent('Qqp6o90Si', + name='Angela', + full_name='Jones', + phone_number='12032498764') self.assertEqual(new_agent, expected_json.agent) @requests_mock.Mocker() def test_update_agent(self, m): - m.put(HUSTLE_URI + "agents/CrJUBI1CF", json=expected_json.agent) - updated_agent = self.hustle.update_agent( - "CrJUBI1CF", name="Angela", full_name="Jones" - ) + m.put(HUSTLE_URI + 'agents/CrJUBI1CF', json=expected_json.agent) + updated_agent = self.hustle.update_agent('CrJUBI1CF', + name='Angela', + full_name='Jones') self.assertEqual(updated_agent, expected_json.agent) diff --git a/test/test_mailchimp/expected_json.py b/test/test_mailchimp/expected_json.py index 69e8abe1e1..966c8d96ce 100644 --- a/test/test_mailchimp/expected_json.py +++ b/test/test_mailchimp/expected_json.py @@ -18,7 +18,7 @@ "list_is_active": True, "list_name": "Support Our Candidate List 1", "segment_text": "", - "recipient_count": 145, + "recipient_count": 145 }, "settings": { "subject_line": "Sample Campaign 1", @@ -36,7 +36,7 @@ "fb_comments": True, "timewarp": False, "template_id": 12345, - "drag_and_drop": True, + "drag_and_drop": True }, "tracking": { "opens": True, @@ -45,7 +45,7 @@ "goal_tracking": False, "ecomm360": False, "google_analytics": "", - "clicktale": "", + "clicktale": "" }, "report_summary": { "opens": 48, @@ -54,10 +54,16 @@ "clicks": 1, "subscriber_clicks": 1, "click_rate": 0.006993006993006993, - "ecommerce": {"total_orders": 0, "total_spent": 0, "total_revenue": 0}, + "ecommerce": { + "total_orders": 0, + "total_spent": 0, + "total_revenue": 0 + } }, - "delivery_status": {"enabled": False}, - "_links": [], + "delivery_status": { + "enabled": False + }, + "_links": [] }, { "id": "def", @@ -77,7 +83,7 @@ "list_is_active": True, "list_name": "Support Our Candidate List 2", "segment_text": "", - "recipient_count": 87, + "recipient_count": 87 }, "settings": { "subject_line": "Sample Campaign 2", @@ -95,7 +101,7 @@ "fb_comments": True, "timewarp": False, "template_id": 67890, - "drag_and_drop": True, + "drag_and_drop": True }, "tracking": { "opens": True, @@ -104,7 +110,7 @@ "goal_tracking": False, "ecomm360": False, "google_analytics": "", - "clicktale": "", + "clicktale": "" }, "report_summary": { "opens": 108, @@ -113,13 +119,17 @@ "clicks": 25, "subscriber_clicks": 14, "click_rate": 0.16470588235294117, - "ecommerce": {"total_orders": 0, "total_spent": 0, "total_revenue": 0}, + "ecommerce": { + "total_orders": 0, + "total_spent": 0, + "total_revenue": 0 + } }, - "delivery_status": {"enabled": False}, - "_links": [], - }, - ] -} + "delivery_status": { + "enabled": False + }, + "_links": [] + }]} test_lists = { "lists": [ @@ -135,18 +145,17 @@ "state": "OH", "zip": "43358", "country": "US", - "phone": "", + "phone": "" }, "permission_reminder": ( "You are receiving this email because you signed up at an event, while being " - "canvassed, or on our website." - ), + "canvassed, or on our website."), "use_archive_bar": True, "campaign_defaults": { "from_name": "Our Candidate", "from_email": "our_candidate@example.com", "subject": "", - "language": "en", + "language": "en" }, "notify_on_subscribe": "", "notify_on_unsubscribe": "", @@ -177,9 +186,9 @@ "open_rate": 38.40236686390532, "click_rate": 4.016786570743405, "last_sub_date": "2019-09-24T01:07:56+00:00", - "last_unsub_date": "2020-01-06T01:55:02+00:00", + "last_unsub_date": "2020-01-06T01:55:02+00:00" }, - "_links": [], + "_links": [] }, { "id": "xvu", @@ -193,18 +202,17 @@ "state": "OH", "zip": "43358", "country": "US", - "phone": "", + "phone": "" }, "permission_reminder": ( "You are receiving this email because you signed up at an event, while being " - "canvassed, or on our website." - ), + "canvassed, or on our website."), "use_archive_bar": True, "campaign_defaults": { "from_name": "Our Candidate", "from_email": "our_candidate@example.com", "subject": "", - "language": "en", + "language": "en" }, "notify_on_subscribe": "", "notify_on_unsubscribe": "", @@ -235,133 +243,143 @@ "open_rate": 64.19236186394533, "click_rate": 3.746759370417411, "last_sub_date": "2020-01-01T00:19:46+00:00", - "last_unsub_date": "2019-12-23T11:44:31+00:00", + "last_unsub_date": "2019-12-23T11:44:31+00:00" }, - "_links": [], + "_links": [] }, ], "total_items": 1, "constraints": { "may_create": False, "max_instances": 1, - "current_total_instances": 1, + "current_total_instances": 1 }, - "_links": [], + "_links": [] } test_members = { - "members": [ - { - "id": "9eb69db8d0371811aa18803a1ae21584", - "email_address": "member_1@example.com", - "unique_email_id": "c82a25d939", - "web_id": 24816326, - "email_type": "html", - "status": "subscribed", - "merge_fields": { - "FNAME": "Member", - "LNAME": "One", - "ADDRESS": { - "addr1": "", - "addr2": "", - "city": "", - "state": "", - "zip": "", - "country": "US", - }, - "PHONE": "", - "BIRTHDAY": "", - }, - "stats": {"avg_open_rate": 0.3571, "avg_click_rate": 0}, - "ip_signup": "", - "timestamp_signup": "", - "ip_opt": "174.59.50.35", - "timestamp_opt": "2019-03-25T22:55:44+00:00", - "member_rating": 4, - "last_changed": "2019-03-25T22:55:44+00:00", - "language": "en", - "vip": False, - "email_client": "Gmail", - "location": { - "latitude": 40.0293, - "longitude": -76.2656, - "gmtoff": 0, - "dstoff": 0, - "country_code": "US", - "timezone": "717/223", - }, - "source": "Unknown", - "tags_count": 0, - "tags": [], - "list_id": "67fdf4b1f4", - "_links": [], + "members": [ + { + "id": "9eb69db8d0371811aa18803a1ae21584", + "email_address": "member_1@example.com", + "unique_email_id": "c82a25d939", + "web_id": 24816326, + "email_type": "html", + "status": "subscribed", + "merge_fields": { + "FNAME": "Member", + "LNAME": "One", + "ADDRESS": { + "addr1": "", + "addr2": "", + "city": "", + "state": "", + "zip": "", + "country": "US" }, + "PHONE": "", + "BIRTHDAY": "" + }, + "stats": { + "avg_open_rate": 0.3571, + "avg_click_rate": 0 + }, + "ip_signup": "", + "timestamp_signup": "", + "ip_opt": "174.59.50.35", + "timestamp_opt": "2019-03-25T22:55:44+00:00", + "member_rating": 4, + "last_changed": "2019-03-25T22:55:44+00:00", + "language": "en", + "vip": False, + "email_client": "Gmail", + "location": { + "latitude": 40.0293, + "longitude": -76.2656, + "gmtoff": 0, + "dstoff": 0, + "country_code": "US", + "timezone": "717/223" + }, + "source": "Unknown", + "tags_count": 0, + "tags": [], + "list_id": "67fdf4b1f4", + "_links": [] + }, + { + "id": "4f315641dbad7b74acc0f4a5d3741ac6", + "email_address": "member_2@example.com", + "unique_email_id": "8d308d69d3", + "web_id": 12233445, + "email_type": "html", + "status": "subscribed", + "merge_fields": { + "FNAME": "Member", + "LNAME": "Two", + "ADDRESS": "", + "PHONE": "", + "BIRTHDAY": "" + }, + "stats": { + "avg_open_rate": 0.5, + "avg_click_rate": 0 + }, + "ip_signup": "", + "timestamp_signup": "", + "ip_opt": "174.59.50.35", + "timestamp_opt": "2019-03-25T23:04:46+00:00", + "member_rating": 4, + "last_changed": "2019-03-25T23:04:46+00:00", + "language": "", + "vip": False, + "email_client": "iPhone", + "location": { + "latitude": 40.0459, + "longitude": -76.3542, + "gmtoff": 0, + "dstoff": 0, + "country_code": "US", + "timezone": "717/223" + }, + "source": "Import", + "tags_count": 2, + "tags": [ { - "id": "4f315641dbad7b74acc0f4a5d3741ac6", - "email_address": "member_2@example.com", - "unique_email_id": "8d308d69d3", - "web_id": 12233445, - "email_type": "html", - "status": "subscribed", - "merge_fields": { - "FNAME": "Member", - "LNAME": "Two", - "ADDRESS": "", - "PHONE": "", - "BIRTHDAY": "", - }, - "stats": {"avg_open_rate": 0.5, "avg_click_rate": 0}, - "ip_signup": "", - "timestamp_signup": "", - "ip_opt": "174.59.50.35", - "timestamp_opt": "2019-03-25T23:04:46+00:00", - "member_rating": 4, - "last_changed": "2019-03-25T23:04:46+00:00", - "language": "", - "vip": False, - "email_client": "iPhone", - "location": { - "latitude": 40.0459, - "longitude": -76.3542, - "gmtoff": 0, - "dstoff": 0, - "country_code": "US", - "timezone": "717/223", - }, - "source": "Import", - "tags_count": 2, - "tags": [ - {"id": 17493, "name": "canvass"}, - {"id": 17497, "name": "canvass-03-17-2019"}, - ], - "list_id": "67fdf4b1f4", - "_links": [], + "id": 17493, + "name": "canvass" }, - ] -} - -test_unsubscribes = { - "unsubscribes": [ { - "email_id": "e542e5cd7b414e5ff8409ff57cf154be", - "email_address": "unsubscribe_1@exmaple.com", - "merge_fields": { - "FNAME": "Unsubscriber", - "LNAME": "One", - "ADDRESS": "", - "PHONE": "5558754307", - "BIRTHDAY": "", - }, - "vip": False, - "timestamp": "2019-12-09T21:18:06+00:00", - "reason": "None given", - "campaign_id": "abc", - "list_id": "zyx", - "list_is_active": True, - "_links": [], + "id": 17497, + "name": "canvass-03-17-2019" } - ], - "campaign_id": "abc", - "total_items": 1, - "_links": [], + ], + "list_id": "67fdf4b1f4", + "_links": [] + }]} + +test_unsubscribes = { + "unsubscribes": [ + { + "email_id": "e542e5cd7b414e5ff8409ff57cf154be", + "email_address": "unsubscribe_1@exmaple.com", + "merge_fields": { + "FNAME": "Unsubscriber", + "LNAME": "One", + "ADDRESS": "", + "PHONE": "5558754307", + "BIRTHDAY": "" + }, + "vip": False, + "timestamp": "2019-12-09T21:18:06+00:00", + "reason": "None given", + "campaign_id": "abc", + "list_id": "zyx", + "list_is_active": True, + "_links": [] + } + ], + "campaign_id": "abc", + "total_items": 1, + "_links": [] } diff --git a/test/test_mailchimp/test_mailchimp.py b/test/test_mailchimp/test_mailchimp.py index 2569af3de7..014eb4d234 100644 --- a/test/test_mailchimp/test_mailchimp.py +++ b/test/test_mailchimp/test_mailchimp.py @@ -1,12 +1,13 @@ -from parsons import Mailchimp +from parsons.mailchimp.mailchimp import Mailchimp import unittest import requests_mock from test.test_mailchimp import expected_json -API_KEY = "mykey-us00" +API_KEY = 'mykey-us00' class TestMailchimp(unittest.TestCase): + def setUp(self): self.mc = Mailchimp(API_KEY) @@ -15,7 +16,7 @@ def setUp(self): def test_get_campaigns(self, m): # Test that campaigns are returned correctly. - m.get(self.mc.uri + "campaigns", json=expected_json.test_campaigns) + m.get(self.mc.uri + 'campaigns', json=expected_json.test_campaigns) tbl = self.mc.get_campaigns() self.assertEqual(tbl.num_rows, 2) @@ -24,7 +25,7 @@ def test_get_campaigns(self, m): def test_get_lists(self, m): # Test that lists are returned correctly. - m.get(self.mc.uri + "lists", json=expected_json.test_lists) + m.get(self.mc.uri + 'lists', json=expected_json.test_lists) tbl = self.mc.get_lists() self.assertEqual(tbl.num_rows, 2) @@ -33,8 +34,8 @@ def test_get_lists(self, m): def test_get_members(self, m): # Test that list members are returned correctly. - m.get(self.mc.uri + "lists/zyx/members", json=expected_json.test_members) - tbl = self.mc.get_members(list_id="zyx") + m.get(self.mc.uri + 'lists/zyx/members', json=expected_json.test_members) + tbl = self.mc.get_members(list_id='zyx') self.assertEqual(tbl.num_rows, 2) @@ -42,10 +43,7 @@ def test_get_members(self, m): def test_get_unsubscribes(self, m): # Test that campaign unsubscribes are returned correctly. - m.get( - self.mc.uri + "reports/abc/unsubscribed", - json=expected_json.test_unsubscribes, - ) - tbl = self.mc.get_unsubscribes(campaign_id="abc") + m.get(self.mc.uri + 'reports/abc/unsubscribed', json=expected_json.test_unsubscribes) + tbl = self.mc.get_unsubscribes(campaign_id='abc') self.assertEqual(tbl.num_rows, 1) diff --git a/test/test_mobilize/__init__.py b/test/test_mobilize/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/test_mobilize/test_mobilize_america.py b/test/test_mobilize/test_mobilize_america.py deleted file mode 100644 index 3301c07f31..0000000000 --- a/test/test_mobilize/test_mobilize_america.py +++ /dev/null @@ -1,139 +0,0 @@ -import unittest -import requests_mock -from parsons import MobilizeAmerica -from test.utils import validate_list -import test.test_mobilize.test_mobilize_json as test_json - - -class TestMobilizeAmerica(unittest.TestCase): - def setUp(self): - - self.ma = MobilizeAmerica() - - def tearDown(self): - - pass - - def test_time_parse(self): - - # Test that Unix conversion works correctly - self.assertEqual(self.ma._time_parse("<=2018-12-13"), "lte_1544659200") - - # Test that it throws an error when you put in an invalid filter - self.assertRaises(ValueError, self.ma._time_parse, "=2018-12-01") - - @requests_mock.Mocker() - def test_get_organizations(self, m): - - m.get(self.ma.uri + "organizations", json=test_json.GET_ORGANIZATIONS_JSON) - - expected = [ - "id", - "name", - "slug", - "is_coordinated", - "is_independent", - "is_primary_campaign", - "state", - "district", - "candidate_name", - "race_type", - "event_feed_url", - "created_date", - "modified_date", - ] - - # Assert response is expected structure - self.assertTrue(validate_list(expected, self.ma.get_organizations())) - - @requests_mock.Mocker() - def test_get_events(self, m): - - m.get(self.ma.uri + "events", json=test_json.GET_EVENTS_JSON) - - expected = [ - "id", - "description", - "timezone", - "title", - "summary", - "featured_image_url", - "event_type", - "created_date", - "modified_date", - "browser_url", - "high_priority", - "contact", - "visibility", - "sponsor_candidate_name", - "sponsor_created_date", - "sponsor_district", - "sponsor_event_feed_url", - "sponsor_id", - "sponsor_is_coordinated", - "sponsor_is_independent", - "sponsor_is_primary_campaign", - "sponsor_modified_date", - "sponsor_name", - "sponsor_race_type", - "sponsor_slug", - "sponsor_state", - "address_lines", - "congressional_district", - "locality", - "postal_code", - "region", - "state_leg_district", - "state_senate_district", - "venue", - "latitude", - "longitude", - "timeslots_0_end_date", - "timeslots_0_id", - "timeslots_0_start_date", - ] - - # Assert response is expected structure - self.assertTrue(validate_list(expected, self.ma.get_events())) - - @requests_mock.Mocker() - def test__get_events_organization__can_exclude_timeslots(self, m): - - m.get(requests_mock.ANY, json=test_json.GET_EVENTS_ORGANIZATION_JSON) - - ma = MobilizeAmerica(api_key="test_password") - data = ma.get_events_organization(1, max_timeslots=0) - - self.assertNotIn("timeslots_0_id", data.columns) - - @requests_mock.Mocker() - def test__get_events_organization__can_get_all_timeslots(self, m): - - m.get(requests_mock.ANY, json=test_json.GET_EVENTS_ORGANIZATION_JSON) - - ma = MobilizeAmerica(api_key="test_password") - data = ma.get_events_organization(1) - - self.assertIn("timeslots_0_id", data.columns) - self.assertIn("timeslots_1_id", data.columns) - - @requests_mock.Mocker() - def test__get_events_organization__can_limit_timeslots(self, m): - - m.get(requests_mock.ANY, json=test_json.GET_EVENTS_ORGANIZATION_JSON) - - ma = MobilizeAmerica(api_key="test_password") - data = ma.get_events_organization(1, max_timeslots=1) - - self.assertIn("timeslots_0_id", data.columns) - self.assertNotIn("timeslots_1_id", data.columns) - - @requests_mock.Mocker() - def test_get_events_deleted(self, m): - - m.get(self.ma.uri + "events/deleted", json=test_json.GET_EVENTS_DELETED_JSON) - - # Assert response is expected structure - self.assertTrue( - validate_list(["id", "deleted_date"], self.ma.get_events_deleted()) - ) diff --git a/test/test_mobilize/test_mobilize_json.py b/test/test_mobilize/test_mobilize_json.py deleted file mode 100644 index 6f7ea324d9..0000000000 --- a/test/test_mobilize/test_mobilize_json.py +++ /dev/null @@ -1,261 +0,0 @@ -import json - -GET_ORGANIZATIONS_JSON = { - "count": 38, - "next": None, - "previous": ( - "https://events.mobilizeamerica.io/api/v1/organizations?updated_since=1543644000" - ), - "data": [ - { - "id": 1251, - "name": "Mike Blake for New York City", - "slug": "mikefornyc", - "is_coordinated": "True", - "is_independent": "True", - "is_primary_campaign": "False", - "state": "", - "district": "", - "candidate_name": "", - "race_type": "OTHER_LOCAL", - "event_feed_url": "https://events.mobilizeamerica.io/mikefornyc/", - "created_date": 1545885434, - "modified_date": 1546132256, - } - ], -} - -GET_EVENTS_JSON = { - "count": 1, - "next": None, - "previous": None, - "data": [ - { - "id": 86738, - "description": ( - "Join our team of volunteers and learn how to engage students in local " - "high schools, communicate our mission, and register young voters." - ), - "timezone": "America/Chicago", - "title": "Student Voter Initiative Training", - "summary": "", - "featured_image_url": ( - "https://mobilizeamerica.imgix.net/uploads/event/" - "40667432145_6188839fe3_o_20190102224312253645.jpeg" - ), - "sponsor": { - "id": 1076, - "name": "Battleground Texas", - "slug": "battlegroundtexas", - "is_coordinated": True, - "is_independent": False, - "is_primary_campaign": False, - "state": "", - "district": "", - "candidate_name": "", - "race_type": None, - "event_feed_url": "https://events.mobilizeamerica.io/battlegroundtexas/", - "created_date": 1538590930, - "modified_date": 1546468308, - }, - "timeslots": [ - {"id": 526226, "start_date": 1547330400, "end_date": 1547335800} - ], - "location": { - "venue": "Harris County Democratic Party HQ", - "address_lines": ["4619 Lyons Ave", ""], - "locality": "Houston", - "region": "TX", - "postal_code": "77020", - "location": {"latitude": 29.776446, "longitude": -95.323037}, - "congressional_district": "18", - "state_leg_district": "142", - "state_senate_district": None, - }, - "event_type": "TRAINING", - "created_date": 1546469706, - "modified_date": 1547335800, - "browser_url": ( - "https://events.mobilizeamerica.io/battlegroundtexas/event/86738/" - ), - "high_priority": None, - "contact": None, - "visibility": "PUBLIC", - } - ], -} - -GET_EVENTS_ORGANIZATION_JSON = json.loads( - """ - { - "count": 2, - "next": null, - "previous": null, - "data": [ - { - "approval_status": "APPROVED", - "address_visibility": "PUBLIC", - "location": { - "venue": "Test", - "address_lines": [ - "123 Test Road", - "" - ], - "locality": "York", - "region": "PA", - "country": "US", - "postal_code": "17404", - "location": { - "latitude": 40.0588876, - "longitude": -76.7835604 - }, - "congressional_district": null, - "state_leg_district": null, - "state_senate_district": null - }, - "timeslots": [ - { - "id": 1, - "start_date": 2, - "end_date": 3, - "instructions": "" - }, - { - "id": 2, - "start_date": 3, - "end_date": 4, - "instructions": "Some detailed instructions for the second timeslot" - } - ], - "title": "Test Event", - "accessibility_status": null, - "created_date": 1532629574, - "created_by_volunteer_host": false, - "instructions": "", - "virtual_action_url": null, - "summary": "", - "sponsor": { - "id": 1, - "is_primary_campaign": false, - "name": "Test Org", - "is_independent": true, - "candidate_name": "", - "org_type": "OTHER", - "created_date": 1513974036, - "event_feed_url": "https://www.mobilize.us/test_org/", - "state": "", - "race_type": null, - "logo_url": "https://mobilize-uploads-prod.s3.amazonaws.com/branding/test_logo.png", - "is_coordinated": false, - "district": "", - "slug": "testorg", - "modified_date": 1655222024 - }, - "featured_image_url": "", - "contact": { - "name": "Test", - "email_address": "tester@test.org", - "phone_number": "", - "owner_user_id": 1234 - }, - "timezone": "America/New_York", - "id": 7659, - "description": "Test", - "event_campaign": null, - "high_priority": false, - "accessibility_notes": null, - "event_type": "PHONE_BANK", - "browser_url": "https://www.mobilize.us/test_org/event/1/", - "visibility": "PUBLIC", - "is_virtual": false, - "modified_date": 1601663981, - "tags": [] - }, - { - "approval_status": "APPROVED", - "address_visibility": "PRIVATE", - "location": { - "locality": "Schenectady", - "region": "NY", - "country": "US", - "postal_code": "12309", - "congressional_district": "20", - "state_leg_district": "110", - "state_senate_district": "49", - "address_lines": [ - "This event address is private. Sign up for more details", - "" - ], - "venue": "This event address is private. Sign up for more details" - }, - "timeslots": [ - { - "id": 1, - "start_date": 2, - "end_date": 3, - "instructions": "" - }, - { - "id": 2, - "start_date": 3, - "end_date": 4, - "instructions": "Some detailed instructions for the second timeslot" - } - ], - "title": "Test Phonebank", - "accessibility_status": null, - "created_date": 1537289907, - "created_by_volunteer_host": false, - "instructions": null, - "virtual_action_url": null, - "summary": "Help Us Call Testers!", - "sponsor": { - "id": 321, - "is_primary_campaign": false, - "name": "Test Org Two", - "is_independent": false, - "candidate_name": "", - "org_type": "TEST_ORG", - "created_date": 1537214527, - "event_feed_url": "https://www.mobilize.us/test_org_two/", - "state": "NY", - "race_type": null, - "logo_url": "https://amazonaws.com/test_org_two.jpg", - "is_coordinated": true, - "district": "", - "slug": "testorgtwo", - "modified_date": 1654183362 - }, - "featured_image_url": "https://mobilizeamerica.imgix.net/uploads/test.jpg", - "contact": null, - "timezone": "America/New_York", - "id": 421, - "description": "Join us to call people and do an automated test!", - "event_campaign": null, - "high_priority": false, - "accessibility_notes": null, - "event_type": "PHONE_BANK", - "browser_url": "https://www.mobilize.us/test_org_two/event/2/", - "visibility": "PUBLIC", - "is_virtual": false, - "modified_date": 1601665649, - "tags": [] - } - ], - "metadata": { - "url_name": "public_organization_events", - "build_commit": "abcd", - "page_title": null - } - }""" -) - -GET_EVENTS_DELETED_JSON = { - "count": 2, - "next": None, - "previous": None, - "data": [ - {"id": 86765, "deleted_date": 1546705971}, - {"id": 86782, "deleted_date": 1546912779}, - ], -} diff --git a/test/test_mobilize_america.py b/test/test_mobilize_america.py new file mode 100644 index 0000000000..62c069cb59 --- /dev/null +++ b/test/test_mobilize_america.py @@ -0,0 +1,153 @@ +import unittest +import requests_mock +from parsons.mobilize_america import MobilizeAmerica +from test.utils import validate_list + + +class TestMobilizeAmerica(unittest.TestCase): + + def setUp(self): + + self.ma = MobilizeAmerica() + + def tearDown(self): + + pass + + def test_time_parse(self): + + # Test that Unix conversion works correctly + self.assertEqual(self.ma._time_parse('<=2018-12-13'), 'lte_1544659200') + + # Test that it throws an error when you put in an invalid filter + self.assertRaises(ValueError, self.ma._time_parse, '=2018-12-01') + + @requests_mock.Mocker() + def test_get_organizations(self, m): + + json = { + "count": 38, + "next": None, + "previous": ( + "https://events.mobilizeamerica.io/api/v1/organizations?updated_since=1543644000"), + "data": [ + { + "id": 1251, + "name": "Mike Blake for New York City", + "slug": "mikefornyc", + "is_coordinated": 'True', + "is_independent": 'True', + "is_primary_campaign": 'False', + "state": "", + "district": "", + "candidate_name": "", + "race_type": "OTHER_LOCAL", + "event_feed_url": "https://events.mobilizeamerica.io/mikefornyc/", + "created_date": 1545885434, + "modified_date": 1546132256 + } + ] + } + + m.get(self.ma.uri + 'organizations', json=json) + + expected = [ + 'id', 'name', 'slug', 'is_coordinated', 'is_independent', 'is_primary_campaign', + 'state', 'district', 'candidate_name', 'race_type', 'event_feed_url', 'created_date', + 'modified_date'] + + # Assert response is expected structure + self.assertTrue(validate_list(expected, self.ma.get_organizations())) + + @requests_mock.Mocker() + def test_get_events(self, m): + + json = { + 'count': 1, 'next': None, 'previous': None, + 'data': [ + { + 'id': 86738, + 'description': ( + 'Join our team of volunteers and learn how to engage students in local ' + 'high schools, communicate our mission, and register young voters.'), + 'timezone': 'America/Chicago', + 'title': 'Student Voter Initiative Training', + 'summary': '', + 'featured_image_url': ( + 'https://mobilizeamerica.imgix.net/uploads/event/' + '40667432145_6188839fe3_o_20190102224312253645.jpeg'), + 'sponsor': { + 'id': 1076, + 'name': 'Battleground Texas', + 'slug': 'battlegroundtexas', + 'is_coordinated': True, + 'is_independent': False, + 'is_primary_campaign': False, + 'state': '', + 'district': '', + 'candidate_name': '', + 'race_type': None, + 'event_feed_url': 'https://events.mobilizeamerica.io/battlegroundtexas/', + 'created_date': 1538590930, + 'modified_date': 1546468308 + }, + 'timeslots': [{ + 'id': 526226, + 'start_date': 1547330400, + 'end_date': 1547335800}], + 'location': { + 'venue': 'Harris County Democratic Party HQ', + 'address_lines': ['4619 Lyons Ave', ''], + 'locality': 'Houston', + 'region': 'TX', + 'postal_code': '77020', + 'location': {'latitude': 29.776446, 'longitude': -95.323037}, + 'congressional_district': '18', + 'state_leg_district': '142', + 'state_senate_district': None + }, + 'event_type': 'TRAINING', + 'created_date': 1546469706, + 'modified_date': 1547335800, + 'browser_url': ( + 'https://events.mobilizeamerica.io/battlegroundtexas/event/86738/'), + 'high_priority': None, + 'contact': None, + 'visibility': 'PUBLIC' + } + ] + } + m.get(self.ma.uri + 'events', json=json) + + expected = [ + 'id', 'description', 'timezone', 'title', 'summary', 'featured_image_url', + 'event_type', 'created_date', 'modified_date', 'browser_url', 'high_priority', + 'contact', 'visibility', 'sponsor_candidate_name', 'sponsor_created_date', + 'sponsor_district', 'sponsor_event_feed_url', 'sponsor_id', 'sponsor_is_coordinated', + 'sponsor_is_independent', 'sponsor_is_primary_campaign', 'sponsor_modified_date', + 'sponsor_name', 'sponsor_race_type', 'sponsor_slug', 'sponsor_state', 'address_lines', + 'congressional_district', 'locality', 'postal_code', 'region', 'state_leg_district', + 'state_senate_district', 'venue', 'latitude', 'longitude', 'timeslots_0_end_date', + 'timeslots_0_id', 'timeslots_0_start_date' + ] + + # Assert response is expected structure + self.assertTrue(validate_list(expected, self.ma.get_events())) + + @requests_mock.Mocker() + def test_get_events_deleted(self, m): + + json = {'count': 2, + 'next': None, + 'previous': None, + 'data': [{'id': 86765, + 'deleted_date': 1546705971}, + {'id': 86782, + 'deleted_date': 1546912779} + ] + } + + m.get(self.ma.uri + 'events/deleted', json=json) + + # Assert response is expected structure + self.assertTrue(validate_list(['id', 'deleted_date'], self.ma.get_events_deleted())) diff --git a/test/test_nation_builder/__init__.py b/test/test_nation_builder/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/test_nation_builder/fixtures.py b/test/test_nation_builder/fixtures.py deleted file mode 100644 index fb5a0eaee6..0000000000 --- a/test/test_nation_builder/fixtures.py +++ /dev/null @@ -1,350 +0,0 @@ -GET_PEOPLE_RESPONSE = { - "results": [ - { - "birthdate": None, - "city_district": None, - "civicrm_id": None, - "county_district": None, - "county_file_id": None, - "created_at": "2023-06-22T11:41:56-07:00", - "datatrust_id": None, - "do_not_call": False, - "do_not_contact": False, - "dw_id": None, - "email": "foo@example.com", - "email_opt_in": True, - "employer": None, - "external_id": None, - "federal_district": None, - "fire_district": None, - "first_name": "Foo", - "has_facebook": False, - "id": 4, - "is_twitter_follower": False, - "is_volunteer": False, - "judicial_district": None, - "labour_region": None, - "last_name": "Bar", - "linkedin_id": None, - "mobile": None, - "mobile_opt_in": None, - "middle_name": "", - "nbec_guid": None, - "ngp_id": None, - "note": None, - "occupation": None, - "party": None, - "pf_strat_id": None, - "phone": None, - "precinct_id": None, - "primary_address": None, - "profile_image_url_ssl": "https://example.com/assets/notifier/profile-avatar.png", - "recruiter_id": None, - "rnc_id": None, - "rnc_regid": None, - "salesforce_id": None, - "school_district": None, - "school_sub_district": None, - "sex": None, - "signup_type": 0, - "state_file_id": None, - "state_lower_district": None, - "state_upper_district": None, - "support_level": None, - "supranational_district": None, - "tags": ["zoot", "boot"], - "twitter_id": None, - "twitter_name": None, - "updated_at": "2023-06-22T11:41:56-07:00", - "van_id": None, - "village_district": None, - "ward": None, - "work_phone_number": None, - }, - { - "birthdate": None, - "city_district": None, - "civicrm_id": None, - "county_district": None, - "county_file_id": None, - "created_at": "2023-06-22T08:21:00-07:00", - "datatrust_id": None, - "do_not_call": False, - "do_not_contact": False, - "dw_id": None, - "email": "bar@example.com", - "email_opt_in": True, - "employer": None, - "external_id": None, - "federal_district": None, - "fire_district": None, - "first_name": "Zoo", - "has_facebook": False, - "id": 2, - "is_twitter_follower": False, - "is_volunteer": False, - "judicial_district": None, - "labour_region": None, - "last_name": "Baz", - "linkedin_id": None, - "mobile": None, - "mobile_opt_in": True, - "middle_name": "", - "nbec_guid": None, - "ngp_id": None, - "note": None, - "occupation": None, - "party": None, - "pf_strat_id": None, - "phone": None, - "precinct_id": None, - "primary_address": None, - "profile_image_url_ssl": "https://example.com/assets/notifier/profile-avatar.png", - "recruiter_id": None, - "rnc_id": None, - "rnc_regid": None, - "salesforce_id": None, - "school_district": None, - "school_sub_district": None, - "sex": None, - "signup_type": 0, - "state_file_id": None, - "state_lower_district": None, - "state_upper_district": None, - "support_level": None, - "supranational_district": None, - "tags": ["zoo", "bar"], - "twitter_id": None, - "twitter_name": None, - "updated_at": "2023-06-22T11:43:03-07:00", - "van_id": None, - "village_district": None, - "ward": None, - "work_phone_number": None, - }, - ], - "next": None, - "prev": None, -} - -PERSON_RESPONSE = { - "person": { - "birthdate": None, - "city_district": None, - "civicrm_id": None, - "county_district": None, - "county_file_id": None, - "created_at": "2023-06-22T08:21:00-07:00", - "datatrust_id": None, - "do_not_call": False, - "do_not_contact": False, - "dw_id": None, - "email": "foo@example.com", - "email_opt_in": True, - "employer": None, - "external_id": None, - "federal_district": None, - "fire_district": None, - "first_name": "Foo", - "has_facebook": False, - "id": 1, - "is_twitter_follower": False, - "is_volunteer": False, - "judicial_district": None, - "labour_region": None, - "last_name": "Bar", - "linkedin_id": None, - "mobile": None, - "mobile_opt_in": True, - "middle_name": "", - "nbec_guid": None, - "ngp_id": None, - "note": None, - "occupation": None, - "party": None, - "pf_strat_id": None, - "phone": None, - "precinct_id": None, - "primary_address": None, - "profile_image_url_ssl": "https://example.com/assets/notifier/profile-avatar.png", - "recruiter_id": None, - "rnc_id": None, - "rnc_regid": None, - "salesforce_id": None, - "school_district": None, - "school_sub_district": None, - "sex": None, - "signup_type": 0, - "state_file_id": None, - "state_lower_district": None, - "state_upper_district": None, - "support_level": None, - "supranational_district": None, - "tags": [], - "twitter_id": None, - "twitter_name": None, - "updated_at": "2023-06-22T08:21:00-07:00", - "van_id": None, - "village_district": None, - "ward": None, - "work_phone_number": None, - "active_customer_expires_at": None, - "active_customer_started_at": None, - "author": None, - "author_id": None, - "auto_import_id": None, - "availability": None, - "ballots": [], - "banned_at": None, - "billing_address": None, - "bio": None, - "call_status_id": None, - "call_status_name": None, - "capital_amount_in_cents": 500, - "children_count": 0, - "church": None, - "city_sub_district": None, - "closed_invoices_amount_in_cents": None, - "closed_invoices_count": None, - "contact_status_id": None, - "contact_status_name": None, - "could_vote_status": False, - "demo": None, - "donations_amount_in_cents": 0, - "donations_amount_this_cycle_in_cents": 0, - "donations_count": 0, - "donations_count_this_cycle": 0, - "donations_pledged_amount_in_cents": 0, - "donations_raised_amount_in_cents": 0, - "donations_raised_amount_this_cycle_in_cents": 0, - "donations_raised_count": 0, - "donations_raised_count_this_cycle": 0, - "donations_to_raise_amount_in_cents": 0, - "email1": "foo@example.com", - "email1_is_bad": False, - "email2": None, - "email2_is_bad": False, - "email3": None, - "email3_is_bad": False, - "email4": None, - "email4_is_bad": False, - "emails": [ - { - "email_address": "foo@example.com", - "email_number": 1, - "is_bad": False, - "is_primary": True, - } - ], - "ethnicity": None, - "facebook_address": None, - "facebook_profile_url": None, - "facebook_updated_at": None, - "facebook_username": None, - "fax_number": None, - "federal_donotcall": False, - "first_donated_at": None, - "first_fundraised_at": None, - "first_invoice_at": None, - "first_prospect_at": None, - "first_recruited_at": None, - "first_supporter_at": "2023-06-22T08:21:00-07:00", - "first_volunteer_at": None, - "full_name": "Foo Bar", - "home_address": None, - "import_id": None, - "inferred_party": None, - "inferred_support_level": None, - "invoice_payments_amount_in_cents": None, - "invoice_payments_referred_amount_in_cents": None, - "invoices_amount_in_cents": None, - "invoices_count": None, - "is_absentee_voter": None, - "is_active_voter": None, - "is_deceased": False, - "is_donor": False, - "is_dropped_from_file": None, - "is_early_voter": None, - "is_fundraiser": False, - "is_ignore_donation_limits": False, - "is_leaderboardable": True, - "is_mobile_bad": False, - "is_permanent_absentee_voter": None, - "is_possible_duplicate": False, - "is_profile_private": False, - "is_profile_searchable": True, - "is_prospect": False, - "is_supporter": True, - "is_survey_question_private": False, - "language": None, - "last_call_id": None, - "last_contacted_at": None, - "last_contacted_by": None, - "last_donated_at": None, - "last_fundraised_at": None, - "last_invoice_at": None, - "last_rule_violation_at": None, - "legal_name": None, - "locale": None, - "mailing_address": None, - "marital_status": None, - "media_market_name": None, - "meetup_id": None, - "meetup_address": None, - "mobile_normalized": None, - "nbec_precinct_code": None, - "nbec_precinct": None, - "note_updated_at": None, - "outstanding_invoices_amount_in_cents": None, - "outstanding_invoices_count": None, - "overdue_invoices_count": None, - "page_slug": None, - "parent": None, - "parent_id": None, - "party_member": None, - "phone_normalized": None, - "phone_time": None, - "precinct_code": None, - "precinct_name": None, - "prefix": None, - "previous_party": None, - "primary_email_id": 1, - "priority_level": None, - "priority_level_changed_at": None, - "profile_content": None, - "profile_content_html": None, - "profile_headline": None, - "received_capital_amount_in_cents": 500, - "recruiter": None, - "recruits_count": 0, - "registered_address": None, - "registered_at": None, - "religion": None, - "rule_violations_count": 0, - "signup_sources": [], - "spent_capital_amount_in_cents": 0, - "submitted_address": None, - "subnations": [], - "suffix": None, - "support_level_changed_at": None, - "support_probability_score": None, - "township": None, - "turnout_probability_score": None, - "twitter_address": None, - "twitter_description": None, - "twitter_followers_count": None, - "twitter_friends_count": None, - "twitter_location": None, - "twitter_login": None, - "twitter_updated_at": None, - "twitter_website": None, - "unsubscribed_at": None, - "user_submitted_address": None, - "username": None, - "voter_updated_at": None, - "warnings_count": 0, - "website": None, - "work_address": None, - }, - "precinct": None, -} diff --git a/test/test_nation_builder/test_nation_builder.py b/test/test_nation_builder/test_nation_builder.py deleted file mode 100644 index 83c52ffe6f..0000000000 --- a/test/test_nation_builder/test_nation_builder.py +++ /dev/null @@ -1,178 +0,0 @@ -import unittest - -import requests_mock - -from parsons import NationBuilder as NB - -from .fixtures import GET_PEOPLE_RESPONSE, PERSON_RESPONSE - - -class TestNationBuilder(unittest.TestCase): - def test_client(self): - nb = NB("test-slug", "test-token") - self.assertEqual(nb.client.uri, "https://test-slug.nationbuilder.com/api/v1/") - self.assertEqual( - nb.client.headers, - { - "authorization": "Bearer test-token", - "Content-Type": "application/json", - "Accept": "application/json", - }, - ) - - def test_get_uri_success(self): - self.assertEqual(NB.get_uri("foo"), "https://foo.nationbuilder.com/api/v1") - self.assertEqual(NB.get_uri("bar"), "https://bar.nationbuilder.com/api/v1") - - def test_get_uri_errors(self): - values = ["", " ", None, 1337, {}, []] - - for v in values: - with self.assertRaises(ValueError): - NB.get_uri(v) - - def test_get_auth_headers_success(self): - self.assertEqual(NB.get_auth_headers("foo"), {"authorization": "Bearer foo"}) - self.assertEqual(NB.get_auth_headers("bar"), {"authorization": "Bearer bar"}) - - def test_get_auth_headers_errors(self): - values = ["", " ", None, 1337, {}, []] - - for v in values: - with self.assertRaises(ValueError): - NB.get_auth_headers(v) - - def test_parse_next_params_success(self): - n, t = NB.parse_next_params("/a/b/c?__nonce=foo&__token=bar") - self.assertEqual(n, "foo") - self.assertEqual(t, "bar") - - def test_get_next_params_errors(self): - with self.assertRaises(ValueError): - NB.parse_next_params("/a/b/c?baz=1") - - with self.assertRaises(ValueError): - NB.parse_next_params("/a/b/c?__nonce=1") - - with self.assertRaises(ValueError): - NB.parse_next_params("/a/b/c?__token=1") - - def test_make_next_url(self): - self.assertEqual( - NB.make_next_url("example.com", "bar", "baz"), - "example.com?limit=100&__nonce=bar&__token=baz", - ) - - @requests_mock.Mocker() - def test_get_people_handle_empty_response(self, m): - nb = NB("test-slug", "test-token") - m.get("https://test-slug.nationbuilder.com/api/v1/people", json={"results": []}) - table = nb.get_people() - self.assertEqual(table.num_rows, 0) - - @requests_mock.Mocker() - def test_get_people(self, m): - nb = NB("test-slug", "test-token") - m.get( - "https://test-slug.nationbuilder.com/api/v1/people", - json=GET_PEOPLE_RESPONSE, - ) - table = nb.get_people() - - self.assertEqual(table.num_rows, 2) - self.assertEqual(len(table.columns), 59) - - self.assertEqual(table[0]["first_name"], "Foo") - self.assertEqual(table[0]["last_name"], "Bar") - self.assertEqual(table[0]["email"], "foo@example.com") - - @requests_mock.Mocker() - def test_get_people_with_next(self, m): - """Make two requests and get the same data twice. This will exercise the while loop.""" - nb = NB("test-slug", "test-token") - - GET_PEOPLE_RESPONSE_WITH_NEXT = GET_PEOPLE_RESPONSE.copy() - GET_PEOPLE_RESPONSE_WITH_NEXT[ - "next" - ] = "https://test-slug.nationbuilder.com/api/v1/people?limit=100&__nonce=bar&__token=baz" - - m.get( - "https://test-slug.nationbuilder.com/api/v1/people", - json=GET_PEOPLE_RESPONSE_WITH_NEXT, - ) - - m.get( - "https://test-slug.nationbuilder.com/api/v1/people?limit=100&__nonce=bar&__token=baz", - json=GET_PEOPLE_RESPONSE, - ) - - table = nb.get_people() - - self.assertEqual(table.num_rows, 4) - self.assertEqual(len(table.columns), 59) - - self.assertEqual(table[1]["first_name"], "Zoo") - self.assertEqual(table[1]["last_name"], "Baz") - self.assertEqual(table[1]["email"], "bar@example.com") - - def test_update_person_raises_with_bad_params(self): - nb = NB("test-slug", "test-token") - - with self.assertRaises(ValueError): - nb.update_person(None, {}) - - with self.assertRaises(ValueError): - nb.update_person(1, {}) - - with self.assertRaises(ValueError): - nb.update_person(" ", {}) - - with self.assertRaises(ValueError): - nb.update_person("1", None) - - with self.assertRaises(ValueError): - nb.update_person("1", "bad value") - - @requests_mock.Mocker() - def test_update_person(self, m): - """Requests the correct URL, returns the correct data and doesn't raise exceptions.""" - nb = NB("test-slug", "test-token") - - m.put( - "https://test-slug.nationbuilder.com/api/v1/people/1", - json=PERSON_RESPONSE, - ) - - response = nb.update_person("1", {"tags": ["zoot", "boot"]}) - person = response["person"] - - self.assertEqual(person["id"], 1) - self.assertEqual(person["first_name"], "Foo") - self.assertEqual(person["last_name"], "Bar") - self.assertEqual(person["email"], "foo@example.com") - - def test_upsert_person_raises_with_bad_params(self): - nb = NB("test-slug", "test-token") - - with self.assertRaises(ValueError): - nb.upsert_person({"tags": ["zoot", "boot"]}) - - @requests_mock.Mocker() - def test_upsert_person(self, m): - """Requests the correct URL, returns the correct data and doesn't raise exceptions.""" - nb = NB("test-slug", "test-token") - - m.put( - "https://test-slug.nationbuilder.com/api/v1/people/push", - json=PERSON_RESPONSE, - ) - - created, response = nb.upsert_person({"email": "foo@example.com"}) - self.assertFalse(created) - - person = response["person"] - - self.assertEqual(person["id"], 1) - self.assertEqual(person["first_name"], "Foo") - self.assertEqual(person["last_name"], "Bar") - self.assertEqual(person["email"], "foo@example.com") diff --git a/test/test_newmode/test_newmode.py b/test/test_newmode/test_newmode.py index 53c5b7de83..3995b0370d 100644 --- a/test/test_newmode/test_newmode.py +++ b/test/test_newmode/test_newmode.py @@ -1,161 +1,212 @@ import os import unittest import unittest.mock as mock -from parsons import Newmode +from parsons.newmode import Newmode class TestNewmode(unittest.TestCase): + def setUp(self): - os.environ["NEWMODE_API_USER"] = "MYFAKEUSERNAME" - os.environ["NEWMODE_API_PASSWORD"] = "MYFAKEPASSWORD" + os.environ['NEWMODE_API_USER'] = 'MYFAKEUSERNAME' + os.environ['NEWMODE_API_PASSWORD'] = 'MYFAKEPASSWORD' self.nm = Newmode() self.nm.client = mock.MagicMock() self.nm.client.getTools.return_value = [ - {"id": 1, "title": "Tool 1"}, - {"id": 2, "title": "Tool 2"}, + { + 'id': 1, + 'title': 'Tool 1' + }, + { + 'id': 2, + 'title': 'Tool 2' + }, ] - self.nm.client.getTool.return_value = {"id": 1, "name": "Tool 1"} + self.nm.client.getTool.return_value = { + 'id': 1, + 'name': 'Tool 1' + } self.nm.client.getAction.return_value = { - "required_fields": [ + 'required_fields': [ { - "key": "first_name", - "name": "First Name", - "type": "textfield", - "value": "", + 'key': 'first_name', + 'name': 'First Name', + 'type': 'textfield', + 'value': '' } ] } self.nm.client.lookupTargets.return_value = { - "0": {"unique_id": "TESTMODE-uniqueid", "full_name": "John Doe"} + '0': { + 'unique_id': 'TESTMODE-uniqueid', + 'full_name': 'John Doe' + } } - self.nm.client.runAction.return_value = {"sid": 1} + self.nm.client.runAction.return_value = { + 'sid': 1 + } - self.nm.client.getTarget.return_value = {"id": 1, "full_name": "John Doe"} + self.nm.client.getTarget.return_value = { + 'id': 1, + 'full_name': 'John Doe' + } self.nm.client.getCampaigns.return_value = [ - {"id": 1, "title": "Campaign 1"}, - {"id": 2, "title": "Campaign 2"}, + { + 'id': 1, + 'title': 'Campaign 1' + }, + { + 'id': 2, + 'title': 'Campaign 2' + }, ] - self.nm.client.getCampaign.return_value = {"id": 1, "name": "Campaign 1"} + self.nm.client.getCampaign.return_value = { + 'id': 1, + 'name': 'Campaign 1' + } self.nm.client.getOrganizations.return_value = [ - {"id": 1, "title": "Organization 1"}, - {"id": 2, "title": "Organization 2"}, + { + 'id': 1, + 'title': 'Organization 1' + }, + { + 'id': 2, + 'title': 'Organization 2' + }, ] self.nm.client.getOrganization.return_value = { - "id": 1, - "name": "Organization 1", + 'id': 1, + 'name': 'Organization 1' } self.nm.client.getServices.return_value = [ - {"id": 1, "title": "Service 1"}, - {"id": 2, "title": "Service 2"}, + { + 'id': 1, + 'title': 'Service 1' + }, + { + 'id': 2, + 'title': 'Service 2' + }, ] - self.nm.client.getService.return_value = {"id": 1, "name": "Service 1"} + self.nm.client.getService.return_value = { + 'id': 1, + 'name': 'Service 1' + } self.nm.client.getOutreaches.return_value = [ - {"id": 1, "title": "Outreach 1"}, - {"id": 2, "title": "Outreach 2"}, + { + 'id': 1, + 'title': 'Outreach 1' + }, + { + 'id': 2, + 'title': 'Outreach 2' + }, ] - self.nm.client.getOutreach.return_value = {"id": 1, "name": "Outreach 1"} + self.nm.client.getOutreach.return_value = { + 'id': 1, + 'name': 'Outreach 1' + } def test_get_tools(self): args = {} response = self.nm.get_tools(args) self.nm.client.getTools.assert_called_with(params=args) - self.assertEqual(response[0]["title"], "Tool 1") + self.assertEqual(response[0]['title'], 'Tool 1') def test_get_tool(self): id = 1 response = self.nm.get_tool(id) self.nm.client.getTool.assert_called_with(id, params={}) - self.assertEqual(response["name"], "Tool 1") + self.assertEqual(response['name'], 'Tool 1') def test_lookup_targets(self): id = 1 response = self.nm.lookup_targets(id) self.nm.client.lookupTargets.assert_called_with(id, None, params={}) - self.assertEqual(response[0]["full_name"], "John Doe") + self.assertEqual(response[0]['full_name'], 'John Doe') def test_get_action(self): id = 1 response = self.nm.get_action(id) self.nm.client.getAction.assert_called_with(id, params={}) - self.assertEqual(response["required_fields"][0]["key"], "first_name") + self.assertEqual(response['required_fields'][0]['key'], 'first_name') def test_run_action(self): id = 1 payload = { - "email": "john.doe@example.com", - "first_name": "John", + 'email': 'john.doe@example.com', + 'first_name': 'John', } response = self.nm.run_action(id, payload) self.nm.client.runAction.assert_called_with(id, payload, params={}) self.assertEqual(response, 1) def test_get_target(self): - id = "TESTMODE-aasfff" + id = 'TESTMODE-aasfff' response = self.nm.get_target(id) self.nm.client.getTarget.assert_called_with(id, params={}) - self.assertEqual(response["id"], 1) - self.assertEqual(response["full_name"], "John Doe") + self.assertEqual(response['id'], 1) + self.assertEqual(response['full_name'], 'John Doe') def test_get_campaigns(self): args = {} response = self.nm.get_campaigns(args) self.nm.client.getCampaigns.assert_called_with(params=args) - self.assertEqual(response[0]["title"], "Campaign 1") + self.assertEqual(response[0]['title'], 'Campaign 1') def test_get_campaign(self): id = 1 response = self.nm.get_campaign(id) self.nm.client.getCampaign.assert_called_with(id, params={}) - self.assertEqual(response["name"], "Campaign 1") + self.assertEqual(response['name'], 'Campaign 1') def test_get_organizations(self): args = {} response = self.nm.get_organizations(args) self.nm.client.getOrganizations.assert_called_with(params=args) - self.assertEqual(response[0]["title"], "Organization 1") + self.assertEqual(response[0]['title'], 'Organization 1') def test_get_organization(self): id = 1 response = self.nm.get_organization(id) self.nm.client.getOrganization.assert_called_with(id, params={}) - self.assertEqual(response["name"], "Organization 1") + self.assertEqual(response['name'], 'Organization 1') def test_get_services(self): args = {} response = self.nm.get_services(args) self.nm.client.getServices.assert_called_with(params=args) - self.assertEqual(response[0]["title"], "Service 1") + self.assertEqual(response[0]['title'], 'Service 1') def test_get_service(self): id = 1 response = self.nm.get_service(id) self.nm.client.getService.assert_called_with(id, params={}) - self.assertEqual(response["name"], "Service 1") + self.assertEqual(response['name'], 'Service 1') def test_get_outreaches(self): id = 1 args = {} response = self.nm.get_outreaches(id, args) self.nm.client.getOutreaches.assert_called_with(id, params=args) - self.assertEqual(response[0]["title"], "Outreach 1") + self.assertEqual(response[0]['title'], 'Outreach 1') def test_get_outreach(self): id = 1 response = self.nm.get_outreach(id) self.nm.client.getOutreach.assert_called_with(id, params={}) - self.assertEqual(response["name"], "Outreach 1") + self.assertEqual(response['name'], 'Outreach 1') diff --git a/test/test_p2a.py b/test/test_p2a.py index 3116447a66..0d0db10644 100644 --- a/test/test_p2a.py +++ b/test/test_p2a.py @@ -1,11 +1,10 @@ import unittest import requests_mock from test.utils import validate_list -from parsons import Phone2Action +from parsons.phone2action import Phone2Action import os import copy - adv_json = { "data": [ { @@ -33,13 +32,13 @@ "zip4": 9534, "county": "Tehama", "latitude": "50.0632635", - "longitude": "-122.09654", + "longitude": "-122.09654" }, "districts": { "congressional": "1", "stateSenate": "4", "stateHouse": "3", - "cityCouncil": None, + "cityCouncil": None }, "ids": [], "memberships": [ @@ -56,24 +55,35 @@ "name": "20180524 March for America", "source": None, "created_at": "2018-05-24 21:09:49.000000", - }, + } ], "fields": [], "phones": [ - {"id": 10537860, "address": "+19995206447", "subscribed": "false"} + { + "id": 10537860, + "address": "+19995206447", + "subscribed": 'false' + } ], "emails": [ - {"id": 10537871, "address": "N@k.com", "subscribed": "false"}, - {"id": 10950446, "address": "email@me.com", "subscribed": "false"}, - ], + { + "id": 10537871, + "address": "N@k.com", + "subscribed": 'false' + }, + { + "id": 10950446, + "address": "email@me.com", + "subscribed": 'false' + } + ] } ], "pagination": { "count": 1, "per_page": 100, "current_page": 1, - "next_url": "https://api.phone2action.com/2.0/advocates?page=2", - }, + "next_url": "https://api.phone2action.com/2.0/advocates?page=2"} } camp_json = [ @@ -93,26 +103,30 @@ "call_to_action": "Contact your officials in one click!", "thank_you": "

Thanks for taking action. Please encourage others to act by " "sharing on social media.

", - "background_image": None, + "background_image": None }, "updated_at": { "date": "2017-11-21 23:27:11.000000", "timezone_type": 3, - "timezone": "UTC", - }, + "timezone": "UTC" + } } ] def parse_request_body(m): - kvs = m.split("&") - return {kv.split("=")[0]: kv.split("=")[1] for kv in kvs} + kvs = m.split('&') + return { + kv.split('=')[0]: kv.split('=')[1] + for kv in kvs + } class TestP2A(unittest.TestCase): + def setUp(self): - self.p2a = Phone2Action(app_id="an_id", app_key="app_key") + self.p2a = Phone2Action(app_id='an_id', app_key='app_key') def tearDown(self): @@ -127,201 +141,146 @@ def test_init_args(self): def test_init_envs(self): # Test initilizing class with envs - os.environ["PHONE2ACTION_APP_ID"] = "id" - os.environ["PHONE2ACTION_APP_KEY"] = "key" + os.environ['PHONE2ACTION_APP_ID'] = 'id' + os.environ['PHONE2ACTION_APP_KEY'] = 'key' p2a_envs = Phone2Action() - self.assertEqual(p2a_envs.app_id, "id") - self.assertEqual(p2a_envs.app_key, "key") + self.assertEqual(p2a_envs.app_id, 'id') + self.assertEqual(p2a_envs.app_key, 'key') @requests_mock.Mocker() def test_get_advocates(self, m): - m.get(self.p2a.client.uri + "advocates", json=adv_json) - - adv_exp = [ - "id", - "prefix", - "firstname", - "middlename", - "lastname", - "suffix", - "notes", - "stage", - "connections", - "created_at", - "updated_at", - "address_city", - "address_county", - "address_latitude", - "address_longitude", - "address_state", - "address_street1", - "address_street2", - "address_zip4", - "address_zip5", - "districts_cityCouncil", - "districts_congressional", - "districts_stateHouse", - "districts_stateSenate", - ] - - self.assertTrue(validate_list(adv_exp, self.p2a.get_advocates()["advocates"])) - ids_exp = ["advocate_id", "ids"] - - self.assertTrue(validate_list(ids_exp, self.p2a.get_advocates()["ids"])) - - phone_exp = ["advocate_id", "phones_address", "phones_id", "phones_subscribed"] - self.assertTrue(validate_list(phone_exp, self.p2a.get_advocates()["phones"])) - - tags_exp = ["advocate_id", "tags"] - self.assertTrue(validate_list(tags_exp, self.p2a.get_advocates()["tags"])) - - email_exp = ["advocate_id", "emails_address", "emails_id", "emails_subscribed"] - self.assertTrue(validate_list(email_exp, self.p2a.get_advocates()["emails"])) - - member_exp = [ - "advocate_id", - "memberships_campaignid", - "memberships_created_at", - "memberships_id", - "memberships_name", - "memberships_source", - ] - self.assertTrue( - validate_list(member_exp, self.p2a.get_advocates()["memberships"]) - ) - - fields_exp = ["advocate_id", "fields"] - self.assertTrue(validate_list(fields_exp, self.p2a.get_advocates()["fields"])) + m.get(self.p2a.client.uri + 'advocates', json=adv_json) + + adv_exp = ['id', 'prefix', 'firstname', 'middlename', + 'lastname', 'suffix', 'notes', 'stage', 'connections', + 'created_at', 'updated_at', + 'address_city', 'address_county', 'address_latitude', + 'address_longitude', 'address_state', 'address_street1', + 'address_street2', 'address_zip4', 'address_zip5', + 'districts_cityCouncil', 'districts_congressional', + 'districts_stateHouse', 'districts_stateSenate'] + + self.assertTrue(validate_list(adv_exp, self.p2a.get_advocates()['advocates'])) + ids_exp = ['advocate_id', 'ids'] + + self.assertTrue(validate_list(ids_exp, self.p2a.get_advocates()['ids'])) + + phone_exp = ['advocate_id', 'phones_address', 'phones_id', 'phones_subscribed'] + self.assertTrue(validate_list(phone_exp, self.p2a.get_advocates()['phones'])) + + tags_exp = ['advocate_id', 'tags'] + self.assertTrue(validate_list(tags_exp, self.p2a.get_advocates()['tags'])) + + email_exp = ['advocate_id', 'emails_address', 'emails_id', 'emails_subscribed'] + self.assertTrue(validate_list(email_exp, self.p2a.get_advocates()['emails'])) + + member_exp = ['advocate_id', 'memberships_campaignid', 'memberships_created_at', + 'memberships_id', 'memberships_name', 'memberships_source'] + self.assertTrue(validate_list(member_exp, self.p2a.get_advocates()['memberships'])) + + fields_exp = ['advocate_id', 'fields'] + self.assertTrue(validate_list(fields_exp, self.p2a.get_advocates()['fields'])) @requests_mock.Mocker() def test_get_advocates__by_page(self, m): response = copy.deepcopy(adv_json) # Make it look like there's more data - response["pagination"]["count"] = 100 + response['pagination']['count'] = 100 - m.get(self.p2a.client.uri + "advocates?page=1", json=adv_json) - m.get( - self.p2a.client.uri + "advocates?page=2", - exc=Exception("Should only call once"), - ) + m.get(self.p2a.client.uri + 'advocates?page=1', json=adv_json) + m.get(self.p2a.client.uri + 'advocates?page=2', exc=Exception('Should only call once')) results = self.p2a.get_advocates(page=1) - self.assertTrue(results["advocates"].num_rows, 1) + self.assertTrue(results['advocates'].num_rows, 1) @requests_mock.Mocker() def test_get_advocates__empty(self, m): response = copy.deepcopy(adv_json) - response["data"] = [] + response['data'] = [] # Make it look like there's more data - response["pagination"]["count"] = 0 + response['pagination']['count'] = 0 - m.get(self.p2a.client.uri + "advocates", json=adv_json) + m.get(self.p2a.client.uri + 'advocates', json=adv_json) results = self.p2a.get_advocates() - self.assertTrue(results["advocates"].num_rows, 0) + self.assertTrue(results['advocates'].num_rows, 0) @requests_mock.Mocker() def test_get_campaigns(self, m): - camp_exp = [ - "id", - "name", - "display_name", - "subtitle", - "public", - "topic", - "type", - "link", - "restrict_allow", - "updated_at_date", - "updated_at_timezone", - "updated_at_timezone_type", - "content_background_image", - "content_call_to_action", - "content_introduction", - "content_summary", - "content_thank_you", - ] - - m.get(self.p2a.client.uri + "campaigns", json=camp_json) + camp_exp = ['id', 'name', 'display_name', 'subtitle', + 'public', 'topic', 'type', 'link', 'restrict_allow', + 'updated_at_date', 'updated_at_timezone', + 'updated_at_timezone_type', 'content_background_image', + 'content_call_to_action', 'content_introduction', + 'content_summary', 'content_thank_you'] + + m.get(self.p2a.client.uri + 'campaigns', json=camp_json) self.assertTrue(validate_list(camp_exp, self.p2a.get_campaigns())) @requests_mock.Mocker() def test_create_advocate(self, m): - m.post(self.p2a.client.uri + "advocates", json={"advocateid": 1}) + m.post(self.p2a.client.uri + 'advocates', json={'advocateid': 1}) # Test arg validation - create requires a phone or an email - self.assertRaises( - ValueError, - lambda: self.p2a.create_advocate( - campaigns=[1], firstname="Foo", lastname="bar" - ), - ) + self.assertRaises(ValueError, + lambda: self.p2a.create_advocate(campaigns=[1], + firstname='Foo', + lastname='bar')) # Test arg validation - sms opt in requires a phone - self.assertRaises( - ValueError, - lambda: self.p2a.create_advocate( - campaigns=[1], email="foo@bar.com", sms_optin=True - ), - ) + self.assertRaises(ValueError, + lambda: self.p2a.create_advocate(campaigns=[1], + email='foo@bar.com', + sms_optin=True)) # Test arg validation - email opt in requires a email - self.assertRaises( - ValueError, - lambda: self.p2a.create_advocate( - campaigns=[1], phone="1234567890", email_optin=True - ), - ) + self.assertRaises(ValueError, + lambda: self.p2a.create_advocate(campaigns=[1], + phone='1234567890', + email_optin=True)) # Test a successful call - advocateid = self.p2a.create_advocate( - campaigns=[1], email="foo@bar.com", email_optin=True, firstname="Test" - ) + advocateid = self.p2a.create_advocate(campaigns=[1], + email='foo@bar.com', + email_optin=True, + firstname='Test') self.assertTrue(m.called) self.assertEqual(advocateid, 1) # Check that the properties were mapped data = parse_request_body(m.last_request.text) - self.assertEqual(data["firstname"], "Test") - self.assertNotIn("lastname", data) - self.assertEqual(data["emailOptin"], "1") - self.assertEqual(data["email"], "foo%40bar.com") + self.assertEqual(data['firstname'], 'Test') + self.assertNotIn('lastname', data) + self.assertEqual(data['emailOptin'], '1') + self.assertEqual(data['email'], 'foo%40bar.com') @requests_mock.Mocker() def test_update_advocate(self, m): - m.post(self.p2a.client.uri + "advocates") + m.post(self.p2a.client.uri + 'advocates') # Test arg validation - sms opt in requires a phone - self.assertRaises( - ValueError, lambda: self.p2a.update_advocate(advocate_id=1, sms_optin=True) - ) + self.assertRaises(ValueError, + lambda: self.p2a.update_advocate(advocate_id=1, sms_optin=True)) # Test arg validation - email opt in requires a email - self.assertRaises( - ValueError, - lambda: self.p2a.update_advocate(advocate_id=1, email_optin=True), - ) + self.assertRaises(ValueError, + lambda: self.p2a.update_advocate(advocate_id=1, email_optin=True)) # Test a successful call - self.p2a.update_advocate( - advocate_id=1, - campaigns=[1], - email="foo@bar.com", - email_optin=True, - firstname="Test", - ) + self.p2a.update_advocate(advocate_id=1, campaigns=[1], email='foo@bar.com', + email_optin=True, firstname='Test') self.assertTrue(m.called) # Check that the properties were mapped data = parse_request_body(m.last_request.text) - self.assertEqual(data["firstname"], "Test") - self.assertNotIn("lastname", data) - self.assertEqual(data["emailOptin"], "1") - self.assertEqual(data["email"], "foo%40bar.com") + self.assertEqual(data['firstname'], 'Test') + self.assertNotIn('lastname', data) + self.assertEqual(data['emailOptin'], '1') + self.assertEqual(data['email'], 'foo%40bar.com') diff --git a/test/test_pdi/conftest.py b/test/test_pdi/conftest.py index 5eb510e022..7077c684c1 100644 --- a/test/test_pdi/conftest.py +++ b/test/test_pdi/conftest.py @@ -8,9 +8,9 @@ def live_pdi(): # Generate a live PDI connection based on these env vars - username = os.environ["PDI_USERNAME"] - password = os.environ["PDI_PASSWORD"] - api_token = os.environ["PDI_API_TOKEN"] + username = os.environ['PDI_USERNAME'] + password = os.environ['PDI_PASSWORD'] + api_token = os.environ['PDI_API_TOKEN'] pdi = PDI(username, password, api_token, qa_url=True) @@ -22,12 +22,10 @@ def mock_pdi(requests_mock): # Not meant to hit live api servers requests_mock.post( - "https://apiqa.bluevote.com/sessions", - json={ + "https://apiqa.bluevote.com/sessions", json={ "AccessToken": "AccessToken", "ExpirationDate": "2100-01-01", - }, - ) + }) username = "PDI_USERNAME" password = "PDI_PASSWORD" diff --git a/test/test_pdi/test_events.py b/test/test_pdi/test_events.py deleted file mode 100644 index 6f2de7656c..0000000000 --- a/test/test_pdi/test_events.py +++ /dev/null @@ -1,62 +0,0 @@ -from test.utils import mark_live_test -from parsons import Table - - -##### - -START_DATE = "2020-01-01" -END_DATE = "2022-12-31" -EXPAND = True -LOWER_LIMIT = 1 - -# TODO: Invoke this, it should fail as 2000 is the max limit for -# all of the relevant events functions -UPPER_LIMIT = 2001 - - -@mark_live_test -def test_get_calendars(live_pdi): - response = live_pdi.get_calendars() - - assert type(response) == Table - - -@mark_live_test -def test_get_calendars_with_limit(live_pdi): - response = live_pdi.get_calendars(limit=LOWER_LIMIT) - - assert response.num_rows == 1 - - -@mark_live_test -def test_get_event_activities(live_pdi): - response = live_pdi.get_event_activities(start_date=START_DATE, end_date=END_DATE) - - assert type(response) == Table - - -@mark_live_test -def test_get_event_activities_with_limit(live_pdi): - response = live_pdi.get_event_activities( - start_date=START_DATE, end_date=END_DATE, limit=LOWER_LIMIT - ) - - assert response.num_rows == 1 - - -@mark_live_test -def test_get_event_activity_assignments(live_pdi): - response = live_pdi.get_event_activity_assignments( - start_date=START_DATE, end_date=END_DATE, expand=EXPAND - ) - - assert type(response) == Table - - -@mark_live_test -def test_get_event_activity_assignments_with_limit(live_pdi): - response = live_pdi.get_event_activity_assignments( - start_date=START_DATE, end_date=END_DATE, expand=EXPAND - ) - - assert response.num_rows == 1 diff --git a/test/test_pdi/test_flag_ids.py b/test/test_pdi/test_flag_ids.py index e69ce5e9c8..307ea2b004 100644 --- a/test/test_pdi/test_flag_ids.py +++ b/test/test_pdi/test_flag_ids.py @@ -4,7 +4,6 @@ from contextlib import contextmanager from requests.exceptions import HTTPError - # import json import pytest @@ -59,7 +58,8 @@ def temp_flag_id(pdi, my_flag_id=None): def test_get_flag_ids(live_pdi, limit): flag_ids = live_pdi.get_flag_ids(limit=limit) - expected_columns = ["id", "flagId", "flagIdDescription", "compile", "isDefault"] + expected_columns = [ + "id", "flagId", "flagIdDescription", "compile", "isDefault"] expected_num_rows = limit or QA_NUM_FLAG_IDS assert isinstance(flag_ids, Table) @@ -70,15 +70,14 @@ def test_get_flag_ids(live_pdi, limit): @mark_live_test @pytest.mark.parametrize( "id", - [ - pytest.param(QA_REAL_FLAG_ID), - pytest.param(QA_INVALID_FLAG_ID, marks=[xfail_http_error]), - ], -) + [pytest.param(QA_REAL_FLAG_ID), + pytest.param(QA_INVALID_FLAG_ID, marks=[xfail_http_error]), + ]) def test_get_flag_id(live_pdi, id): flag_id = live_pdi.get_flag_id(id) - expected_keys = ["id", "flagId", "flagIdDescription", "compile", "isDefault"] + expected_keys = [ + "id", "flagId", "flagIdDescription", "compile", "isDefault"] assert isinstance(flag_id, dict) assert list(flag_id.keys()) == expected_keys @@ -87,12 +86,10 @@ def test_get_flag_id(live_pdi, id): @mark_live_test @pytest.mark.parametrize( ["flag_id", "is_default"], - [ - pytest.param(None, True, marks=[xfail_http_error]), - pytest.param("amm", None, marks=[xfail_http_error]), - pytest.param("amm", True), - ], -) + [pytest.param(None, True, marks=[xfail_http_error]), + pytest.param("amm", None, marks=[xfail_http_error]), + pytest.param("amm", True), + ]) def test_create_flag_id(live_pdi, cleanup_flag_id, flag_id, is_default): flag_id = live_pdi.create_flag_id(flag_id, is_default) @@ -102,12 +99,10 @@ def test_create_flag_id(live_pdi, cleanup_flag_id, flag_id, is_default): @mark_live_test @pytest.mark.parametrize( ["my_flag_id"], - [ - pytest.param(None), - pytest.param(QA_INVALID_FLAG_ID), - pytest.param(QA_MALFORMED_FLAG_ID, marks=[xfail_http_error]), - ], -) + [pytest.param(None), + pytest.param(QA_INVALID_FLAG_ID), + pytest.param(QA_MALFORMED_FLAG_ID, marks=[xfail_http_error]), + ]) def test_delete_flag_id(live_pdi, create_temp_flag_id, my_flag_id): with create_temp_flag_id(live_pdi, my_flag_id) as flag_id: did_delete = live_pdi.delete_flag_id(flag_id) @@ -118,12 +113,10 @@ def test_delete_flag_id(live_pdi, create_temp_flag_id, my_flag_id): @mark_live_test @pytest.mark.parametrize( ["my_flag_id"], - [ - pytest.param(None), - pytest.param(QA_INVALID_FLAG_ID, marks=[xfail_http_error]), - pytest.param(QA_MALFORMED_FLAG_ID, marks=[xfail_http_error]), - ], -) + [pytest.param(None), + pytest.param(QA_INVALID_FLAG_ID, marks=[xfail_http_error]), + pytest.param(QA_MALFORMED_FLAG_ID, marks=[xfail_http_error]), + ]) def test_update_flag_id(live_pdi, create_temp_flag_id, my_flag_id): with create_temp_flag_id(live_pdi, my_flag_id) as flag_id: # flag initial state: @@ -136,8 +129,7 @@ def test_update_flag_id(live_pdi, create_temp_flag_id, my_flag_id): "flagId": "bnh", "flagIdDescription": None, "compile": "", - "isDefault": True, - } + "isDefault": True} flag_id_dict = live_pdi.get_flag_id(flag_id) diff --git a/test/test_pdi/test_pdi.py b/test/test_pdi/test_pdi.py index 6343515853..3dfcc0ef70 100644 --- a/test/test_pdi/test_pdi.py +++ b/test/test_pdi/test_pdi.py @@ -29,13 +29,11 @@ def test_connection(): @pytest.mark.parametrize( ["username", "password", "api_token"], - [ - (None, None, None), - (None, "pass", "token"), - ("user", None, "token"), - ("user", "pass", None), - ], -) + [(None, None, None), + (None, "pass", "token"), + ("user", None, "token"), + ("user", "pass", None), + ]) def test_init_error(username, password, api_token): remove_from_env("PDI_USERNAME", "PDI_PASSWORD", "PDI_API_TOKEN") with pytest.raises(KeyError): @@ -44,14 +42,10 @@ def test_init_error(username, password, api_token): @pytest.mark.parametrize( ["obj", "exp_obj"], - [ - ({"a": "a", "b": None, "c": "c"}, {"a": "a", "c": "c"}), - ( - [{"a": "a", "b": None, "c": "c"}, {"a": "a", "c": None}], - [{"a": "a", "c": "c"}, {"a": "a"}], - ), - ("string", "string"), - ], -) + [({"a": "a", "b": None, "c": "c"}, {"a": "a", "c": "c"}), + ([{"a": "a", "b": None, "c": "c"}, {"a": "a", "c": None}], + [{"a": "a", "c": "c"}, {"a": "a"}]), + ("string", "string"), + ]) def test_clean_dict(mock_pdi, obj, exp_obj): assert mock_pdi._clean_dict(obj) == exp_obj diff --git a/test/test_quickbase/test_data.py b/test/test_quickbase/test_data.py index f21fd68da0..128d70b4ad 100644 --- a/test/test_quickbase/test_data.py +++ b/test/test_quickbase/test_data.py @@ -1,70 +1,140 @@ test_get_app_tables = [ + { + "alias": "_DBID_MEMBERSHIP", + "created": "2020-09-01T20:16:15Z", + "defaultSortFieldId": 4, + "defaultSortOrder": "ASC", + "description": "", + "id": "abcdef", + "keyFieldId": 3, + "name": "Members", + "nextFieldId": 100, + "nextRecordId": 250, + "pluralRecordName": "Members", + "singleRecordName": "Member", + "sizeLimit": "500 MB", + "spaceRemaining": "500 MB", + "spaceUsed": "50 KB", + "updated": "2020-10-11T15:25:53Z" + }, + { + "alias": "_DBID_MEETINGS", + "created": "2020-07-10T03:16:15Z", + "defaultSortFieldId": 7, + "defaultSortOrder": "DESC", + "description": "", + "id": "brqdmcesd", + "keyFieldId": 7, + "name": "Meetings", + "nextFieldId": 18, + "nextRecordId": 81, + "pluralRecordName": "Meetings", + "singleRecordName": "Meeting", + "sizeLimit": "500 MB", + "spaceRemaining": "500 MB", + "spaceUsed": "100 KB", + "updated": "2020-09-11T14:17:23Z" + }] + +test_query_records = { + "data": [ + { + "1": { + "value": "2020-01-31T15:13:35Z" + }, + "11": { + "value": "" + }, + "5": { + "value": "" + }, + "10": { + "value": "" + }, + "2": { + "value": "First name" + }, + "12": { + "value": "" + }, + "3": { + "value": "Last name" + }, + "6": { + "value": "exampleemail@example.com" + }, + "4": { + "value": "(555) 555-5555" + }, + "9": { + "value": "Wirdd" + }, + "8": { + "value": "99999" + } + } + ], + "fields": [ { - "alias": "_DBID_MEMBERSHIP", - "created": "2020-09-01T20:16:15Z", - "defaultSortFieldId": 4, - "defaultSortOrder": "ASC", - "description": "", - "id": "abcdef", - "keyFieldId": 3, - "name": "Members", - "nextFieldId": 100, - "nextRecordId": 250, - "pluralRecordName": "Members", - "singleRecordName": "Member", - "sizeLimit": "500 MB", - "spaceRemaining": "500 MB", - "spaceUsed": "50 KB", - "updated": "2020-10-11T15:25:53Z", + "id": 1, + "label": "Date Created", + "type": "timestamp" }, { - "alias": "_DBID_MEETINGS", - "created": "2020-07-10T03:16:15Z", - "defaultSortFieldId": 7, - "defaultSortOrder": "DESC", - "description": "", - "id": "brqdmcesd", - "keyFieldId": 7, - "name": "Meetings", - "nextFieldId": 18, - "nextRecordId": 81, - "pluralRecordName": "Meetings", - "singleRecordName": "Meeting", - "sizeLimit": "500 MB", - "spaceRemaining": "500 MB", - "spaceUsed": "100 KB", - "updated": "2020-09-11T14:17:23Z", + "id": 2, + "label": "First Name", + "type": "text" }, -] - -test_query_records = { - "data": [ - { - "1": {"value": "2020-01-31T15:13:35Z"}, - "11": {"value": ""}, - "5": {"value": ""}, - "10": {"value": ""}, - "2": {"value": "First name"}, - "12": {"value": ""}, - "3": {"value": "Last name"}, - "6": {"value": "exampleemail@example.com"}, - "4": {"value": "(555) 555-5555"}, - "9": {"value": "Wirdd"}, - "8": {"value": "99999"}, - } - ], - "fields": [ - {"id": 1, "label": "Date Created", "type": "timestamp"}, - {"id": 2, "label": "First Name", "type": "text"}, - {"id": 3, "label": "Last Name", "type": "text"}, - {"id": 4, "label": "Phone Number", "type": "phone"}, - {"id": 5, "label": "Address: City", "type": "text"}, - {"id": 6, "label": "Email", "type": "email"}, - {"id": 8, "label": "ZIP Code", "type": "text"}, - {"id": 9, "label": "City", "type": "text"}, - {"id": 10, "label": "State/Region", "type": "text-multiple-choice"}, - {"id": 11, "label": "Street 1", "type": "text"}, - {"id": 12, "label": "Gender Identity", "type": "text-multi-line"}, - ], - "metadata": {"numFields": 11, "numRecords": 1, "skip": 0, "totalRecords": 1}, + { + "id": 3, + "label": "Last Name", + "type": "text" + }, + { + "id": 4, + "label": "Phone Number", + "type": "phone" + }, + { + "id": 5, + "label": "Address: City", + "type": "text" + }, + { + "id": 6, + "label": "Email", + "type": "email" + }, + { + "id": 8, + "label": "ZIP Code", + "type": "text" + }, + { + "id": 9, + "label": "City", + "type": "text" + }, + { + "id": 10, + "label": "State/Region", + "type": "text-multiple-choice" + }, + { + "id": 11, + "label": "Street 1", + "type": "text" + }, + { + "id": 12, + "label": "Gender Identity", + "type": "text-multi-line" + } + ], + "metadata": { + "numFields": 11, + "numRecords": 1, + "skip": 0, + "totalRecords": 1 + } } diff --git a/test/test_quickbase/test_quickbase.py b/test/test_quickbase/test_quickbase.py index 8c23d75e4b..6b03f66ccc 100644 --- a/test/test_quickbase/test_quickbase.py +++ b/test/test_quickbase/test_quickbase.py @@ -1,25 +1,26 @@ +from parsons.quickbase.quickbase import Quickbase +from test.test_quickbase import test_data import unittest import requests_mock -from parsons import Quickbase -from test.test_quickbase import test_data class TestQuickbase(unittest.TestCase): + @requests_mock.Mocker() def test_get_app_tables(self, m): - qb = Quickbase(hostname="test.example.com", user_token="12345") - m.get( - f"{qb.api_hostname}/tables?appId=test", json=test_data.test_get_app_tables - ) - tbl = qb.get_app_tables(app_id="test") + qb = Quickbase(hostname='test.example.com', user_token='12345') + m.get(f'{qb.api_hostname}/tables?appId=test', + json=test_data.test_get_app_tables) + tbl = qb.get_app_tables(app_id='test') self.assertEqual(tbl.num_rows, 2) @requests_mock.Mocker() def test_query_records(self, m): - qb = Quickbase(hostname="test.example.com", user_token="12345") - m.post(f"{qb.api_hostname}/records/query", json=test_data.test_query_records) - tbl = qb.query_records(table_from="test_table") + qb = Quickbase(hostname='test.example.com', user_token='12345') + m.post(f'{qb.api_hostname}/records/query', + json=test_data.test_query_records) + tbl = qb.query_records(table_from='test_table') self.assertEqual(tbl.num_rows, 1) diff --git a/test/test_redash.py b/test/test_redash.py index a81a23882f..338d2f335c 100644 --- a/test/test_redash.py +++ b/test/test_redash.py @@ -2,130 +2,86 @@ import unittest import requests_mock from test.utils import assert_matching_tables -from parsons import Table, Redash -from parsons.redash.redash import RedashTimeout +from parsons.etl.table import Table +from parsons.redash.redash import Redash, RedashTimeout -BASE_URL = "https://redash.example.com" -API_KEY = "abc123" +BASE_URL = 'https://redash.example.com' +API_KEY = 'abc123' class TestRedash(unittest.TestCase): - mock_data = "foo,bar\n1,2\n3,4" - mock_data_source = { - "id": 1, - "name": "Data Source 1", - "type": "redshift", - "options": { - "dbname": "db_name", - "host": "host.example.com", - "password": "--------", - "port": 5439, - "user": "username", - }, - } - mock_result = Table([("foo", "bar"), ("1", "2"), ("3", "4")]) + mock_data = 'foo,bar\n1,2\n3,4' + mock_result = Table([('foo', 'bar'), ('1', '2'), ('3', '4')]) def setUp(self): self.redash = Redash(BASE_URL, API_KEY) - @requests_mock.Mocker() - def test_get_data_source(self, m): - m.get(f"{BASE_URL}/api/data_sources/1", json=self.mock_data_source) - assert self.redash.get_data_source(1) == self.mock_data_source - - @requests_mock.Mocker() - def test_update_data_source(self, m): - m.post(f"{BASE_URL}/api/data_sources/1", json=self.mock_data_source) - self.redash.update_data_source( - 1, - "Data Source 1", - "redshift", - "db_name", - "host.example.com", - "password", - 5439, - "username", - ) - assert m.call_count == 1 - @requests_mock.Mocker() def test_cached_query(self, m): redash = Redash(BASE_URL) # no user_api_key - m.get(f"{BASE_URL}/api/queries/5/results.csv", text=self.mock_data) - assert_matching_tables( - redash.get_cached_query_results(5, API_KEY), self.mock_result - ) - self.assertEqual(m._adapter.last_request.path, "/api/queries/5/results.csv") - self.assertEqual(m._adapter.last_request.query, "api_key=abc123") - - assert_matching_tables( - self.redash.get_cached_query_results(5), self.mock_result - ) - self.assertEqual(m._adapter.last_request.query, "") + m.get(f'{BASE_URL}/api/queries/5/results.csv', text=self.mock_data) + assert_matching_tables(redash.get_cached_query_results(5, API_KEY), + self.mock_result) + self.assertEqual(m._adapter.last_request.path, '/api/queries/5/results.csv') + self.assertEqual(m._adapter.last_request.query, 'api_key=abc123') + + assert_matching_tables(self.redash.get_cached_query_results(5), + self.mock_result) + self.assertEqual(m._adapter.last_request.query, '') @requests_mock.Mocker() def test_refresh_query(self, m): - m.post( - f"{BASE_URL}/api/queries/5/refresh", - json={"job": {"status": 3, "query_result_id": 21}}, - ) - m.get(f"{BASE_URL}/api/queries/5/results/21.csv", text=self.mock_data) + m.post(f'{BASE_URL}/api/queries/5/refresh', json={ + 'job': {'status': 3, 'query_result_id': 21}}) + m.get(f'{BASE_URL}/api/queries/5/results/21.csv', text=self.mock_data) - assert_matching_tables( - self.redash.get_fresh_query_results(5, {"yyy": "xxx"}), self.mock_result - ) + assert_matching_tables(self.redash.get_fresh_query_results(5, {'yyy': 'xxx'}), + self.mock_result) @requests_mock.Mocker() def test_refresh_query_poll(self, m): - m.post( - f"{BASE_URL}/api/queries/5/refresh", json={"job": {"id": 66, "status": 1}} - ) - m.get( - f"{BASE_URL}/api/jobs/66", - json={"job": {"id": 66, "status": 3, "query_result_id": 21}}, - ) - m.get(f"{BASE_URL}/api/queries/5/results/21.csv", text=self.mock_data) + m.post(f'{BASE_URL}/api/queries/5/refresh', json={ + 'job': {'id': 66, 'status': 1}}) + m.get(f'{BASE_URL}/api/jobs/66', json={ + 'job': {'id': 66, 'status': 3, 'query_result_id': 21}}) + m.get(f'{BASE_URL}/api/queries/5/results/21.csv', text=self.mock_data) self.redash.pause = 0.01 # shorten pause time - assert_matching_tables( - self.redash.get_fresh_query_results(5, {"yyy": "xxx"}), self.mock_result - ) + assert_matching_tables(self.redash.get_fresh_query_results(5, {'yyy': 'xxx'}), + self.mock_result) @requests_mock.Mocker() def test_refresh_query_poll_timeout(self, m): - m.post( - f"{BASE_URL}/api/queries/5/refresh", json={"job": {"id": 66, "status": 1}} - ) - m.get(f"{BASE_URL}/api/jobs/66", json={"job": {"id": 66, "status": 1}}) - m.get(f"{BASE_URL}/api/queries/5/results/21.csv", text=self.mock_data) + m.post(f'{BASE_URL}/api/queries/5/refresh', json={ + 'job': {'id': 66, 'status': 1}}) + m.get(f'{BASE_URL}/api/jobs/66', json={ + 'job': {'id': 66, 'status': 1}}) + m.get(f'{BASE_URL}/api/queries/5/results/21.csv', text=self.mock_data) self.redash.pause = 0.01 # shorten pause time self.redash.timeout = 0.01 # timeout raised = False try: - self.redash.get_fresh_query_results(5, {"yyy": "xxx"}) + self.redash.get_fresh_query_results(5, {'yyy': 'xxx'}) except RedashTimeout: raised = True self.assertTrue(raised) @requests_mock.Mocker() def test_to_table(self, m): - m.post( - f"{BASE_URL}/api/queries/5/refresh", - json={"job": {"status": 3, "query_result_id": 21}}, - ) - m.get(f"{BASE_URL}/api/queries/5/results/21.csv", text=self.mock_data) + m.post(f'{BASE_URL}/api/queries/5/refresh', json={ + 'job': {'status': 3, 'query_result_id': 21}}) + m.get(f'{BASE_URL}/api/queries/5/results/21.csv', text=self.mock_data) self.redash.pause = 0.01 # shorten pause time table_data = Redash.load_to_table( base_url=BASE_URL, user_api_key=API_KEY, query_id=5, - params={"x": "y"}, - verify=False, - ) + params={'x': 'y'}, + verify=False) assert_matching_tables(table_data, self.mock_result) @@ -133,19 +89,15 @@ def test_to_table(self, m): def test_to_table_env_vars(self, m): try: _environ = dict(os.environ) - os.environ.update( - { - "REDASH_BASE_URL": BASE_URL, - "REDASH_USER_API_KEY": API_KEY, - "REDASH_QUERY_ID": "5", - "REDASH_QUERY_PARAMS": "p_x=y", - } - ) - m.post( - f"{BASE_URL}/api/queries/5/refresh", - json={"job": {"status": 3, "query_result_id": 21}}, - ) - m.get(f"{BASE_URL}/api/queries/5/results/21.csv", text=self.mock_data) + os.environ.update({ + 'REDASH_BASE_URL': BASE_URL, + 'REDASH_USER_API_KEY': API_KEY, + 'REDASH_QUERY_ID': "5", + 'REDASH_QUERY_PARAMS': "p_x=y" + }) + m.post(f'{BASE_URL}/api/queries/5/refresh', json={ + 'job': {'status': 3, 'query_result_id': 21}}) + m.get(f'{BASE_URL}/api/queries/5/results/21.csv', text=self.mock_data) self.redash.pause = 0.01 # shorten pause time diff --git a/test/test_redshift.py b/test/test_redshift.py index 9c45fe66d4..8eddc58ebf 100644 --- a/test/test_redshift.py +++ b/test/test_redshift.py @@ -1,4 +1,6 @@ -from parsons import Redshift, S3, Table +from parsons.databases.redshift import Redshift +from parsons.aws import S3 +from parsons.etl.table import Table from test.utils import assert_matching_tables import unittest import os @@ -7,32 +9,32 @@ from testfixtures import LogCapture # The name of the schema and will be temporarily created for the tests -TEMP_SCHEMA = "parsons_test2" +TEMP_SCHEMA = 'parsons_test2' # These tests do not interact with the Redshift Database directly, and don't need real credentials class TestRedshift(unittest.TestCase): + def setUp(self): - self.rs = Redshift( - username="test", password="test", host="test", db="test", port=123 - ) - - self.tbl = Table([["ID", "Name"], [1, "Jim"], [2, "John"], [3, "Sarah"]]) - - self.tbl2 = Table( - [ - ["c1", "c2", "c3", "c4", "c5", "c6", "c7"], - ["a", "", 1, "NA", 1.4, 1, 2], - ["b", "", 2, "NA", 1.4, 1, 2], - ["c", "", 3.4, "NA", "", "", "a"], - ["d", "", 5, "NA", 1.4, 1, 2], - ["e", "", 6, "NA", 1.4, 1, 2], - ["f", "", 7.8, "NA", 1.4, 1, 2], - ["g", "", 9, "NA", 1.4, 1, 2], - ] - ) + self.rs = Redshift(username='test', password='test', host='test', db='test', port=123) + + self.tbl = Table([['ID', 'Name'], + [1, 'Jim'], + [2, 'John'], + [3, 'Sarah']]) + + self.tbl2 = Table([ + ["c1", "c2", "c3", "c4", "c5", "c6", "c7"], + ["a", "", 1, "NA", 1.4, 1, 2], + ["b", "", 2, "NA", 1.4, 1, 2], + ["c", "", 3.4, "NA", "", "", "a"], + ["d", "", 5, "NA", 1.4, 1, 2], + ["e", "", 6, "NA", 1.4, 1, 2], + ["f", "", 7.8, "NA", 1.4, 1, 2], + ["g", "", 9, "NA", 1.4, 1, 2], + ]) self.mapping = self.rs.generate_data_types(self.tbl) self.rs.DO_PARSE_BOOLS = True @@ -41,79 +43,75 @@ def setUp(self): self.mapping3 = self.rs.generate_data_types(self.tbl2) def test_split_full_table_name(self): - schema, table = Redshift.split_full_table_name("some_schema.some_table") - self.assertEqual(schema, "some_schema") - self.assertEqual(table, "some_table") + schema, table = Redshift.split_full_table_name('some_schema.some_table') + self.assertEqual(schema, 'some_schema') + self.assertEqual(table, 'some_table') # When missing the schema - schema, table = Redshift.split_full_table_name("some_table") - self.assertEqual(schema, "public") - self.assertEqual(table, "some_table") + schema, table = Redshift.split_full_table_name('some_table') + self.assertEqual(schema, 'public') + self.assertEqual(table, 'some_table') # When there are too many parts - self.assertRaises(ValueError, Redshift.split_full_table_name, "a.b.c") + self.assertRaises(ValueError, Redshift.split_full_table_name, 'a.b.c') def test_combine_schema_and_table_name(self): - full_table_name = Redshift.combine_schema_and_table_name( - "some_schema", "some_table" - ) - self.assertEqual(full_table_name, "some_schema.some_table") + full_table_name = Redshift.combine_schema_and_table_name('some_schema', 'some_table') + self.assertEqual(full_table_name, 'some_schema.some_table') def test_data_type(self): # Test bool self.rs.DO_PARSE_BOOLS = True - self.assertEqual(self.rs.data_type(1, ""), "bool") - self.assertEqual(self.rs.data_type(True, ""), "bool") + self.assertEqual(self.rs.data_type(1, ''), 'bool') + self.assertEqual(self.rs.data_type(True, ''), 'bool') self.rs.DO_PARSE_BOOLS = False - self.assertEqual(self.rs.data_type(1, ""), "int") - self.assertEqual(self.rs.data_type(True, ""), "varchar") + self.assertEqual(self.rs.data_type(1, ''), 'int') + self.assertEqual(self.rs.data_type(True, ''), 'varchar') # Test smallint # Currently smallints are coded as ints - self.assertEqual(self.rs.data_type(2, ""), "int") + self.assertEqual(self.rs.data_type(2, ''), 'int') # Test int - self.assertEqual(self.rs.data_type(32769, ""), "int") + self.assertEqual(self.rs.data_type(32769, ''), 'int') # Test bigint - self.assertEqual(self.rs.data_type(2147483648, ""), "bigint") + self.assertEqual(self.rs.data_type(2147483648, ''), 'bigint') # Test varchar that looks like an int - self.assertEqual(self.rs.data_type("00001", ""), "varchar") + self.assertEqual(self.rs.data_type('00001', ''), 'varchar') # Test a float as a float - self.assertEqual(self.rs.data_type(5.001, ""), "float") + self.assertEqual(self.rs.data_type(5.001, ''), 'float') # Test varchar - self.assertEqual(self.rs.data_type("word", ""), "varchar") + self.assertEqual(self.rs.data_type('word', ''), 'varchar') # Test int with underscore - self.assertEqual(self.rs.data_type("1_2", ""), "varchar") + self.assertEqual(self.rs.data_type('1_2', ''), 'varchar') # Test int with leading zero - self.assertEqual(self.rs.data_type("01", ""), "varchar") + self.assertEqual(self.rs.data_type('01', ''), 'varchar') def test_generate_data_types(self): # Test correct header labels - self.assertEqual(self.mapping["headers"], ["ID", "Name"]) + self.assertEqual(self.mapping['headers'], ['ID', 'Name']) # Test correct data types - self.assertEqual(self.mapping["type_list"], ["int", "varchar"]) + self.assertEqual(self.mapping['type_list'], ['int', 'varchar']) self.assertEqual( - self.mapping2["type_list"], - ["varchar", "varchar", "float", "varchar", "float", "bool", "varchar"], - ) + self.mapping2['type_list'], + ['varchar', 'varchar', 'float', 'varchar', 'float', 'bool', 'varchar']) self.assertEqual( - self.mapping3["type_list"], - ["varchar", "varchar", "float", "varchar", "float", "int", "varchar"], - ) + self.mapping3['type_list'], + ['varchar', 'varchar', 'float', 'varchar', 'float', 'int', 'varchar']) # Test correct lengths - self.assertEqual(self.mapping["longest"], [1, 5]) + self.assertEqual(self.mapping['longest'], [1, 5]) def test_vc_padding(self): # Test padding calculated correctly - self.assertEqual(self.rs.vc_padding(self.mapping, 0.2), [1, 6]) + self.assertEqual(self.rs.vc_padding(self.mapping, .2), [1, 6]) def test_vc_max(self): # Test max sets it to the max - self.assertEqual(self.rs.vc_max(self.mapping, ["Name"]), [1, 65535]) + self.assertEqual(self.rs.vc_max(self.mapping, ['Name']), [1, 65535]) # Test raises when can't find column # To Do @@ -121,120 +119,93 @@ def test_vc_max(self): def test_vc_validate(self): # Test that a column with a width of 0 is set to 1 - self.mapping["longest"][0] = 0 + self.mapping['longest'][0] = 0 self.mapping = self.rs.vc_validate(self.mapping) self.assertEqual(self.mapping, [1, 5]) def test_create_sql(self): # Test the the statement is expected - sql = self.rs.create_sql("tmc.test", self.mapping, distkey="ID") - exp_sql = ( - "create table tmc.test (\n id int,\n name varchar(5)) \ndistkey(ID) ;" - ) + sql = self.rs.create_sql('tmc.test', self.mapping, distkey='ID') + exp_sql = "create table tmc.test (\n id int,\n name varchar(5)) \ndistkey(ID) ;" self.assertEqual(sql, exp_sql) def test_compound_sortkey(self): # check single sortkey formatting - sql = self.rs.create_sql("tmc.test", self.mapping, sortkey="ID") - exp_sql = ( - "create table tmc.test (\n id int,\n name varchar(5)) \nsortkey(ID);" - ) + sql = self.rs.create_sql('tmc.test', self.mapping, sortkey='ID') + exp_sql = "create table tmc.test (\n id int,\n name varchar(5)) \nsortkey(ID);" self.assertEqual(sql, exp_sql) # check compound sortkey formatting - sql = self.rs.create_sql("tmc.test", self.mapping, sortkey=["ID1", "ID2"]) + sql = self.rs.create_sql('tmc.test', self.mapping, sortkey=['ID1', 'ID2']) exp_sql = "create table tmc.test (\n id int,\n name varchar(5))" exp_sql += " \ncompound sortkey(ID1, ID2);" self.assertEqual(sql, exp_sql) def test_column_validate(self): - bad_cols = [ - "a", - "a", - "", - "SELECT", - "asdfjkasjdfklasjdfklajskdfljaskldfjaklsdfjlaksdfjklasj" - "dfklasjdkfljaskldfljkasjdkfasjlkdfjklasdfjklakjsfasjkdfljaslkdfjklasdfjklasjkl" - "dfakljsdfjalsdkfjklasjdfklasjdfklasdkljf", - ] + bad_cols = ['a', 'a', '', 'SELECT', 'asdfjkasjdfklasjdfklajskdfljaskldfjaklsdfjlaksdfjklasj' + 'dfklasjdkfljaskldfljkasjdkfasjlkdfjklasdfjklakjsfasjkdfljaslkdfjklasdfjklasjkl' + 'dfakljsdfjalsdkfjklasjdfklasjdfklasdkljf'] fixed_cols = [ - "a", - "a_1", - "col_2", - "col_3", - "asdfjkasjdfklasjdfklajskdfljaskldfjaklsdfjlaks" - "dfjklasjdfklasjdkfljaskldfljkasjdkfasjlkdfjklasdfjklakjsfasjkdfljaslkdfjkl", - ] + 'a', 'a_1', 'col_2', 'col_3', 'asdfjkasjdfklasjdfklajskdfljaskldfjaklsdfjlaks' + 'dfjklasjdfklasjdkfljaskldfljkasjdkfasjlkdfjklasdfjklakjsfasjkdfljaslkdfjkl'] self.assertEqual(self.rs.column_name_validate(bad_cols), fixed_cols) def test_create_statement(self): # Assert that copy statement is expected - sql = self.rs.create_statement(self.tbl, "tmc.test", distkey="ID") + sql = self.rs.create_statement(self.tbl, 'tmc.test', distkey='ID') exp_sql = """create table tmc.test (\n "id" int,\n "name" varchar(5)) \ndistkey(ID) ;""" # noqa: E501 self.assertEqual(sql, exp_sql) # Assert that an error is raised by an empty table - empty_table = Table([["Col_1", "Col_2"]]) - self.assertRaises(ValueError, self.rs.create_statement, empty_table, "tmc.test") + empty_table = Table([['Col_1', 'Col_2']]) + self.assertRaises(ValueError, self.rs.create_statement, empty_table, 'tmc.test') def test_get_creds_kwargs(self): # Test passing kwargs - creds = self.rs.get_creds("kwarg_key", "kwarg_secret_key") + creds = self.rs.get_creds('kwarg_key', 'kwarg_secret_key') expected = """credentials 'aws_access_key_id=kwarg_key;aws_secret_access_key=kwarg_secret_key'\n""" # noqa: E501 self.assertEqual(creds, expected) # Test grabbing from environmental variables - prior_aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID", "") - prior_aws_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY", "") - os.environ["AWS_ACCESS_KEY_ID"] = "env_key" - os.environ["AWS_SECRET_ACCESS_KEY"] = "env_secret_key" + prior_aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID', '') + prior_aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY', '') + os.environ['AWS_ACCESS_KEY_ID'] = 'env_key' + os.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key' creds = self.rs.get_creds(None, None) expected = """credentials 'aws_access_key_id=env_key;aws_secret_access_key=env_secret_key'\n""" # noqa: E501 self.assertEqual(creds, expected) # Reset env vars - os.environ["AWS_ACCESS_KEY_ID"] = prior_aws_access_key_id - os.environ["AWS_SECRET_ACCESS_KEY"] = prior_aws_secret_access_key + os.environ['AWS_ACCESS_KEY_ID'] = prior_aws_access_key_id + os.environ['AWS_SECRET_ACCESS_KEY'] = prior_aws_secret_access_key def scrub_copy_tokens(self, s): - s = re.sub("=.+;", "=*HIDDEN*;", s) - s = re.sub("aws_secret_access_key=.+'", "aws_secret_access_key=*HIDDEN*'", s) + s = re.sub('=.+;', '=*HIDDEN*;', s) + s = re.sub('aws_secret_access_key=.+\'', + 'aws_secret_access_key=*HIDDEN*\'', s) return s def test_copy_statement_default(self): - sql = self.rs.copy_statement( - "test_schema.test", - "buck", - "file.csv", - aws_access_key_id="abc123", - aws_secret_access_key="abc123", - bucket_region="us-east-2", - ) + sql = self.rs.copy_statement('test_schema.test', 'buck', 'file.csv', + aws_access_key_id='abc123', + aws_secret_access_key='abc123', + bucket_region='us-east-2') # Scrub the keys - sql = re.sub( - r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql) - ) - - expected_options = [ - "ignoreheader 1", - "acceptanydate", - "dateformat 'auto'", - "timeformat 'auto'", - "csv delimiter ','", - "copy test_schema.test \nfrom 's3://buck/file.csv'", - "'aws_access_key_*id=HIDDEN*;aws_secret_access_key=*HIDDEN*'", - "region 'us-east-2'", - "emptyasnull", - "blanksasnull", - "acceptinvchars", - ] + sql = re.sub(r'id=.+;', '*id=HIDDEN*;', re.sub(r"key=.+'", "key=*HIDDEN*'", sql)) + + expected_options = ['ignoreheader 1', 'acceptanydate', + "dateformat 'auto'", "timeformat 'auto'", "csv delimiter ','", + "copy test_schema.test \nfrom 's3://buck/file.csv'", + "'aws_access_key_*id=HIDDEN*;aws_secret_access_key=*HIDDEN*'", + "region 'us-east-2'", 'emptyasnull', 'blanksasnull', + 'acceptinvchars'] # Check that all of the expected options are there: [self.assertNotEqual(sql.find(o), -1, o) for o in expected_options] @@ -242,63 +213,33 @@ def test_copy_statement_default(self): def test_copy_statement_statupdate(self): sql = self.rs.copy_statement( - "test_schema.test", - "buck", - "file.csv", - aws_access_key_id="abc123", - aws_secret_access_key="abc123", - statupdate=True, - ) + 'test_schema.test', 'buck', 'file.csv', + aws_access_key_id='abc123', aws_secret_access_key='abc123', statupdate=True) # Scrub the keys - sql = re.sub( - r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql) - ) - - expected_options = [ - "statupdate on", - "ignoreheader 1", - "acceptanydate", - "dateformat 'auto'", - "timeformat 'auto'", - "csv delimiter ','", - "copy test_schema.test \nfrom 's3://buck/file.csv'", - "'aws_access_key_*id=HIDDEN*;aws_secret_access_key=*HIDDEN*'", - "emptyasnull", - "blanksasnull", - "acceptinvchars", - ] + sql = re.sub(r'id=.+;', '*id=HIDDEN*;', re.sub(r"key=.+'", "key=*HIDDEN*'", sql)) + + expected_options = ["statupdate on", 'ignoreheader 1', 'acceptanydate', + "dateformat 'auto'", "timeformat 'auto'", "csv delimiter ','", + "copy test_schema.test \nfrom 's3://buck/file.csv'", + "'aws_access_key_*id=HIDDEN*;aws_secret_access_key=*HIDDEN*'", + 'emptyasnull', 'blanksasnull', 'acceptinvchars'] # Check that all of the expected options are there: [self.assertNotEqual(sql.find(o), -1) for o in expected_options] sql2 = self.rs.copy_statement( - "test_schema.test", - "buck", - "file.csv", - aws_access_key_id="abc123", - aws_secret_access_key="abc123", - statupdate=False, - ) + 'test_schema.test', 'buck', 'file.csv', + aws_access_key_id='abc123', aws_secret_access_key='abc123', statupdate=False) # Scrub the keys - sql2 = re.sub( - r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql2) - ) - - expected_options = [ - "statupdate off", - "ignoreheader 1", - "acceptanydate", - "dateformat 'auto'", - "timeformat 'auto'", - "csv delimiter ','", - "copy test_schema.test \nfrom 's3://buck/file.csv'", - "'aws_access_key_*id=HIDDEN*;aws_secret_access_key=*HIDDEN*'", - "emptyasnull", - "blanksasnull", - "acceptinvchars", - ] + sql2 = re.sub(r'id=.+;', '*id=HIDDEN*;', re.sub(r"key=.+'", "key=*HIDDEN*'", sql2)) + + expected_options = ["statupdate off", 'ignoreheader 1', 'acceptanydate', + "dateformat 'auto'", "timeformat 'auto'", "csv delimiter ','", + "copy test_schema.test \nfrom 's3://buck/file.csv'", + "'aws_access_key_*id=HIDDEN*;aws_secret_access_key=*HIDDEN*'", + 'emptyasnull', 'blanksasnull', 'acceptinvchars'] # Check that all of the expected options are there: [self.assertNotEqual(sql2.find(o), -1) for o in expected_options] @@ -306,116 +247,73 @@ def test_copy_statement_statupdate(self): def test_copy_statement_compupdate(self): sql = self.rs.copy_statement( - "test_schema.test", - "buck", - "file.csv", - aws_access_key_id="abc123", - aws_secret_access_key="abc123", - compupdate=True, - ) + 'test_schema.test', 'buck', 'file.csv', + aws_access_key_id='abc123', aws_secret_access_key='abc123', compupdate=True) # Scrub the keys - sql = re.sub( - r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql) - ) - - expected_options = [ - "compupdate on", - "ignoreheader 1", - "acceptanydate", - "dateformat 'auto'", - "timeformat 'auto'", - "csv delimiter ','", - "copy test_schema.test \nfrom 's3://buck/file.csv'", - "'aws_access_key_*id=HIDDEN*;aws_secret_access_key=*HIDDEN*'", - "emptyasnull", - "blanksasnull", - "acceptinvchars", - ] + sql = re.sub(r'id=.+;', '*id=HIDDEN*;', re.sub(r"key=.+'", "key=*HIDDEN*'", sql)) + + expected_options = ["compupdate on", 'ignoreheader 1', 'acceptanydate', + "dateformat 'auto'", "timeformat 'auto'", "csv delimiter ','", + "copy test_schema.test \nfrom 's3://buck/file.csv'", + "'aws_access_key_*id=HIDDEN*;aws_secret_access_key=*HIDDEN*'", + 'emptyasnull', 'blanksasnull', 'acceptinvchars'] # Check that all of the expected options are there: [self.assertNotEqual(sql.find(o), -1) for o in expected_options] sql2 = self.rs.copy_statement( - "test_schema.test", - "buck", - "file.csv", - aws_access_key_id="abc123", - aws_secret_access_key="abc123", - compupdate=False, - ) + 'test_schema.test', 'buck', 'file.csv', + aws_access_key_id='abc123', aws_secret_access_key='abc123', compupdate=False) # Scrub the keys - sql2 = re.sub( - r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql2) - ) - - expected_options = [ - "compupdate off", - "ignoreheader 1", - "acceptanydate", - "dateformat 'auto'", - "timeformat 'auto'", - "csv delimiter ','", - "copy test_schema.test \nfrom 's3://buck/file.csv'", - "'aws_access_key_*id=HIDDEN*;aws_secret_access_key=*HIDDEN*'", - "emptyasnull", - "blanksasnull", - "acceptinvchars", - ] + sql2 = re.sub(r'id=.+;', '*id=HIDDEN*;', re.sub(r"key=.+'", "key=*HIDDEN*'", sql2)) + + expected_options = ["compupdate off", 'ignoreheader 1', 'acceptanydate', + "dateformat 'auto'", "timeformat 'auto'", "csv delimiter ','", + "copy test_schema.test \nfrom 's3://buck/file.csv'", + "'aws_access_key_*id=HIDDEN*;aws_secret_access_key=*HIDDEN*'", + 'emptyasnull', 'blanksasnull', 'acceptinvchars'] # Check that all of the expected options are there: [self.assertNotEqual(sql2.find(o), -1) for o in expected_options] def test_copy_statement_columns(self): - cols = ["a", "b", "c"] + cols = ['a', 'b', 'c'] sql = self.rs.copy_statement( - "test_schema.test", - "buck", - "file.csv", - aws_access_key_id="abc123", - aws_secret_access_key="abc123", - specifycols=cols, - ) + 'test_schema.test', 'buck', 'file.csv', + aws_access_key_id='abc123', aws_secret_access_key='abc123', specifycols=cols) # Scrub the keys - sql = re.sub( - r"id=.+;", "*id=HIDDEN*;", re.sub(r"key=.+'", "key=*HIDDEN*'", sql) - ) - - expected_options = [ - "ignoreheader 1", - "acceptanydate", - "dateformat 'auto'", - "timeformat 'auto'", - "csv delimiter ','", - "copy test_schema.test(a, b, c) \nfrom 's3://buck/file.csv'", - "'aws_access_key_*id=HIDDEN*;aws_secret_access_key=*HIDDEN*'", - "emptyasnull", - "blanksasnull", - "acceptinvchars", - ] + sql = re.sub(r'id=.+;', '*id=HIDDEN*;', re.sub(r"key=.+'", "key=*HIDDEN*'", sql)) + + expected_options = ['ignoreheader 1', 'acceptanydate', + "dateformat 'auto'", "timeformat 'auto'", "csv delimiter ','", + "copy test_schema.test(a, b, c) \nfrom 's3://buck/file.csv'", + "'aws_access_key_*id=HIDDEN*;aws_secret_access_key=*HIDDEN*'", + 'emptyasnull', 'blanksasnull', 'acceptinvchars'] # Check that all of the expected options are there: [self.assertNotEqual(sql.find(o), -1) for o in expected_options] - # These tests interact directly with the Redshift database -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test') class TestRedshiftDB(unittest.TestCase): + def setUp(self): self.temp_schema = TEMP_SCHEMA self.rs = Redshift() - self.tbl = Table([["ID", "Name"], [1, "Jim"], [2, "John"], [3, "Sarah"]]) + self.tbl = Table([['ID', 'Name'], + [1, 'Jim'], + [2, 'John'], + [3, 'Sarah']]) # Create a schema, create a table, create a view setup_sql = f""" @@ -436,8 +334,8 @@ def setUp(self): self.s3 = S3() - self.temp_s3_bucket = os.environ["S3_TEMP_BUCKET"] - self.temp_s3_prefix = "test/" + self.temp_s3_bucket = os.environ['S3_TEMP_BUCKET'] + self.temp_s3_prefix = 'test/' def tearDown(self): @@ -454,48 +352,46 @@ def tearDown(self): def test_query(self): # Check that query sending back expected result - r = self.rs.query("select 1") - self.assertEqual(r[0]["?column?"], 1) + r = self.rs.query('select 1') + self.assertEqual(r[0]['?column?'], 1) def test_query_with_parameters(self): table_name = f"{self.temp_schema}.test" - self.tbl.to_redshift(table_name, if_exists="append") + self.tbl.to_redshift(table_name, if_exists='append') sql = f"select * from {table_name} where name = %s" - name = "Sarah" + name = 'Sarah' r = self.rs.query(sql, parameters=[name]) - self.assertEqual(r[0]["name"], name) + self.assertEqual(r[0]['name'], name) sql = f"select * from {table_name} where name in (%s, %s)" - names = ["Sarah", "John"] + names = ['Sarah', 'John'] r = self.rs.query(sql, parameters=names) self.assertEqual(r.num_rows, 2) def test_schema_exists(self): self.assertTrue(self.rs.schema_exists(self.temp_schema)) - self.assertFalse(self.rs.schema_exists("nonsense")) + self.assertFalse(self.rs.schema_exists('nonsense')) def test_table_exists(self): # Check if table_exists finds a table that exists - self.assertTrue(self.rs.table_exists(f"{self.temp_schema}.test")) + self.assertTrue(self.rs.table_exists(f'{self.temp_schema}.test')) # Check if table_exists is case insensitive - self.assertTrue(self.rs.table_exists(f"{self.temp_schema.upper()}.TEST")) + self.assertTrue(self.rs.table_exists(f'{self.temp_schema.upper()}.TEST')) # Check if table_exists doesn't find a table that doesn't exists - self.assertFalse(self.rs.table_exists(f"{self.temp_schema}.test_fake")) + self.assertFalse(self.rs.table_exists(f'{self.temp_schema}.test_fake')) # Check if table_exists finds a table that exists - self.assertTrue(self.rs.table_exists(f"{self.temp_schema}.test_view")) + self.assertTrue(self.rs.table_exists(f'{self.temp_schema}.test_view')) # Check if table_exists doesn't find a view that doesn't exists - self.assertFalse(self.rs.table_exists(f"{self.temp_schema}.test_view_fake")) + self.assertFalse(self.rs.table_exists(f'{self.temp_schema}.test_view_fake')) # Check that the view kwarg works - self.assertFalse( - self.rs.table_exists(f"{self.temp_schema}.test_view", view=False) - ) + self.assertFalse(self.rs.table_exists(f'{self.temp_schema}.test_view', view=False)) def test_temp_s3_create(self): @@ -515,140 +411,106 @@ def test_copy_s3(self): def test_copy(self): # Copy a table - self.rs.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="drop") + self.rs.copy(self.tbl, f'{self.temp_schema}.test_copy', if_exists='drop') # Test that file exists - r = self.rs.query( - f"select * from {self.temp_schema}.test_copy where name='Jim'" - ) - self.assertEqual(r[0]["id"], 1) + r = self.rs.query(f"select * from {self.temp_schema}.test_copy where name='Jim'") + self.assertEqual(r[0]['id'], 1) # Copy to the same table, to verify that the "truncate" flag works. - self.rs.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="truncate") + self.rs.copy(self.tbl, f'{self.temp_schema}.test_copy', if_exists='truncate') rows = self.rs.query(f"select count(*) from {self.temp_schema}.test_copy") - self.assertEqual(rows[0]["count"], 3) + self.assertEqual(rows[0]['count'], 3) # Copy to the same table, to verify that the "drop" flag works. - self.rs.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="drop") + self.rs.copy(self.tbl, f'{self.temp_schema}.test_copy', if_exists='drop') # Verify that a warning message prints when a DIST/SORT key is omitted with LogCapture() as lc: self.rs.copy( - self.tbl, - f"{self.temp_schema}.test_copy", - if_exists="drop", - sortkey="Name", - ) - desired_log = [ - log for log in lc.records if "optimize your queries" in log.msg - ][0] + self.tbl, f'{self.temp_schema}.test_copy', if_exists='drop', sortkey='Name') + desired_log = [log for log in lc.records if "optimize your queries" in log.msg][0] self.assertTrue("DIST" in desired_log.msg) self.assertFalse("SORT" in desired_log.msg) def test_upsert(self): # Create a target table when no target table exists - self.rs.upsert(self.tbl, f"{self.temp_schema}.test_copy", "ID") + self.rs.upsert(self.tbl, f'{self.temp_schema}.test_copy', 'ID') # Run upsert - upsert_tbl = Table([["id", "name"], [1, "Jane"], [5, "Bob"]]) - self.rs.upsert(upsert_tbl, f"{self.temp_schema}.test_copy", "ID") + upsert_tbl = Table([['id', 'name'], [1, 'Jane'], [5, 'Bob']]) + self.rs.upsert(upsert_tbl, f'{self.temp_schema}.test_copy', 'ID') # Make sure that it is the expected table - expected_tbl = Table( - [["id", "name"], [1, "Jane"], [2, "John"], [3, "Sarah"], [5, "Bob"]] - ) - updated_tbl = self.rs.query( - f"select * from {self.temp_schema}.test_copy order by id;" - ) + expected_tbl = Table([['id', 'name'], [1, 'Jane'], [2, 'John'], [3, 'Sarah'], [5, 'Bob']]) + updated_tbl = self.rs.query(f'select * from {self.temp_schema}.test_copy order by id;') assert_matching_tables(expected_tbl, updated_tbl) # Try to run it with a bad primary key self.rs.query(f"INSERT INTO {self.temp_schema}.test_copy VALUES (1, 'Jim')") self.assertRaises( - ValueError, - self.rs.upsert, - upsert_tbl, - f"{self.temp_schema}.test_copy", - "ID", - ) + ValueError, self.rs.upsert, upsert_tbl, f'{self.temp_schema}.test_copy', 'ID') # Now try and upsert using two primary keys - upsert_tbl = Table([["id", "name"], [1, "Jane"]]) - self.rs.upsert(upsert_tbl, f"{self.temp_schema}.test_copy", ["id", "name"]) + upsert_tbl = Table([['id', 'name'], [1, 'Jane']]) + self.rs.upsert(upsert_tbl, f'{self.temp_schema}.test_copy', ['id', 'name']) # Make sure our table looks like we expect - expected_tbl = Table( - [ - ["id", "name"], - [2, "John"], - [3, "Sarah"], - [5, "Bob"], - [1, "Jim"], - [1, "Jane"], - ] - ) - updated_tbl = self.rs.query( - f"select * from {self.temp_schema}.test_copy order by id;" - ) + expected_tbl = Table([['id', 'name'], + [2, 'John'], [3, 'Sarah'], [5, 'Bob'], [1, 'Jim'], [1, 'Jane']]) + updated_tbl = self.rs.query(f'select * from {self.temp_schema}.test_copy order by id;') assert_matching_tables(expected_tbl, updated_tbl) # Try to run it with a bad primary key self.rs.query(f"INSERT INTO {self.temp_schema}.test_copy VALUES (1, 'Jim')") - self.assertRaises( - ValueError, - self.rs.upsert, - upsert_tbl, - f"{self.temp_schema}.test_copy", - ["ID", "name"], - ) + self.assertRaises(ValueError, self.rs.upsert, upsert_tbl, f'{self.temp_schema}.test_copy', + ['ID', 'name']) - self.rs.query(f"truncate table {self.temp_schema}.test_copy") + self.rs.query(f'truncate table {self.temp_schema}.test_copy') # Run upsert with nonmatching datatypes - upsert_tbl = Table([["id", "name"], [3, 600], [6, 9999]]) - self.rs.upsert(upsert_tbl, f"{self.temp_schema}.test_copy", "ID") + upsert_tbl = Table([['id', 'name'], [3, 600], + [6, 9999]]) + self.rs.upsert(upsert_tbl, f'{self.temp_schema}.test_copy', 'ID') # Make sure our table looks like we expect - expected_tbl = Table([["id", "name"], [3, "600"], [6, "9999"]]) - updated_tbl = self.rs.query( - f"select * from {self.temp_schema}.test_copy order by id;" - ) + expected_tbl = Table([['id', 'name'], + [3, '600'], + [6, '9999']]) + updated_tbl = self.rs.query(f'select * from {self.temp_schema}.test_copy order by id;') assert_matching_tables(expected_tbl, updated_tbl) # Run upsert requiring column resize - upsert_tbl = Table([["id", "name"], [7, "this name is very long"]]) - self.rs.upsert(upsert_tbl, f"{self.temp_schema}.test_copy", "ID") + upsert_tbl = Table([['id', 'name'], [7, 'this name is very long']]) + self.rs.upsert(upsert_tbl, f'{self.temp_schema}.test_copy', 'ID') # Make sure our table looks like we expect - expected_tbl = Table( - [["id", "name"], [3, "600"], [6, "9999"], [7, "this name is very long"]] - ) - updated_tbl = self.rs.query( - f"select * from {self.temp_schema}.test_copy order by id;" - ) + expected_tbl = Table([['id', 'name'], + [3, '600'], + [6, '9999'], + [7, 'this name is very long']]) + updated_tbl = self.rs.query(f'select * from {self.temp_schema}.test_copy order by id;') assert_matching_tables(expected_tbl, updated_tbl) def test_unload(self): # Copy a table to Redshift - self.rs.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="drop") + self.rs.copy(self.tbl, f'{self.temp_schema}.test_copy', if_exists='drop') # Unload a table to S3 - self.rs.unload( - f"select * from {self.temp_schema}.test_copy", - self.temp_s3_bucket, - "unload_test", - ) + self.rs.unload(f'select * from {self.temp_schema}.test_copy', + self.temp_s3_bucket, + 'unload_test') # Check that files are there - self.assertTrue(self.s3.key_exists(self.temp_s3_bucket, "unload_test")) + self.assertTrue(self.s3.key_exists(self.temp_s3_bucket, 'unload_test')) def test_to_from_redshift(self): # Test the parsons table methods - table_name = f"{self.temp_schema}.test_copy" - self.tbl.to_redshift(table_name, if_exists="drop") + table_name = f'{self.temp_schema}.test_copy' + self.tbl.to_redshift(table_name, if_exists='drop') sql = f"SELECT * FROM {table_name} ORDER BY id" result_tbl = Table.from_redshift(sql) # Don't bother checking columns names, since those were tweaked en route to Redshift. @@ -657,70 +519,48 @@ def test_to_from_redshift(self): def test_generate_manifest(self): # Add some tables to buckets - self.tbl.to_s3_csv( - self.temp_s3_bucket, f"{self.temp_s3_prefix}test_file_01.csv" - ) - self.tbl.to_s3_csv( - self.temp_s3_bucket, f"{self.temp_s3_prefix}test_file_02.csv" - ) - self.tbl.to_s3_csv( - self.temp_s3_bucket, f"{self.temp_s3_prefix}test_file_03.csv" - ) - self.tbl.to_s3_csv( - self.temp_s3_bucket, f"{self.temp_s3_prefix}dont_include.csv" - ) + self.tbl.to_s3_csv(self.temp_s3_bucket, f'{self.temp_s3_prefix}test_file_01.csv') + self.tbl.to_s3_csv(self.temp_s3_bucket, f'{self.temp_s3_prefix}test_file_02.csv') + self.tbl.to_s3_csv(self.temp_s3_bucket, f'{self.temp_s3_prefix}test_file_03.csv') + self.tbl.to_s3_csv(self.temp_s3_bucket, f'{self.temp_s3_prefix}dont_include.csv') # Copy in a table to generate the headers and table - self.rs.copy(self.tbl, f"{self.temp_schema}.test_copy", if_exists="drop") + self.rs.copy(self.tbl, f'{self.temp_schema}.test_copy', if_exists='drop') # Generate the manifest - manifest_key = f"{self.temp_s3_prefix}test_manifest.json" - manifest = self.rs.generate_manifest( - self.temp_s3_bucket, - prefix=f"{self.temp_s3_prefix}test_file", - manifest_bucket=self.temp_s3_bucket, - manifest_key=manifest_key, - ) + manifest_key = f'{self.temp_s3_prefix}test_manifest.json' + manifest = self.rs.generate_manifest(self.temp_s3_bucket, + prefix=f'{self.temp_s3_prefix}test_file', + manifest_bucket=self.temp_s3_bucket, + manifest_key=manifest_key) # Validate path formatted correctly - valid_url = f"s3://{self.temp_s3_bucket}/{self.temp_s3_prefix}test_file_01.csv" - self.assertEqual(manifest["entries"][0]["url"], valid_url) + valid_url = f's3://{self.temp_s3_bucket}/{self.temp_s3_prefix}test_file_01.csv' + self.assertEqual(manifest['entries'][0]['url'], valid_url) # Validate that there are three files - self.assertEqual(len(manifest["entries"]), 3) + self.assertEqual(len(manifest['entries']), 3) # Validate that manifest saved to bucket - keys = self.s3.list_keys( - self.temp_s3_bucket, prefix=f"{self.temp_s3_prefix}test_manifest" - ) + keys = self.s3.list_keys(self.temp_s3_bucket, prefix=f'{self.temp_s3_prefix}test_manifest') self.assertTrue(manifest_key in keys) def test_move_table(self): # Run the method and check that new table created - self.rs.move_table(f"{self.temp_schema}.test", f"{self.temp_schema}.test2") - self.assertTrue(self.rs.table_exists(f"{self.temp_schema}.test2")) + self.rs.move_table(f'{self.temp_schema}.test', f'{self.temp_schema}.test2') + self.assertTrue(self.rs.table_exists(f'{self.temp_schema}.test2')) # Run the method again, but drop original self.rs.move_table( - f"{self.temp_schema}.test2", - f"{self.temp_schema}.test3", - drop_source_table=True, - ) - self.assertFalse(self.rs.table_exists(f"{self.temp_schema}.test2")) + f'{self.temp_schema}.test2', f'{self.temp_schema}.test3', drop_source_table=True) + self.assertFalse(self.rs.table_exists(f'{self.temp_schema}.test2')) def test_get_tables(self): tbls_list = self.rs.get_tables(schema=self.temp_schema) - exp = [ - "schemaname", - "tablename", - "tableowner", - "tablespace", - "hasindexes", - "hasrules", - "hastriggers", - ] + exp = ['schemaname', 'tablename', 'tableowner', 'tablespace', 'hasindexes', + 'hasrules', 'hastriggers'] self.assertTrue(validate_list(exp, tbls_list)) @@ -729,28 +569,10 @@ def test_get_table_stats(self): tbls_list = self.rs.get_table_stats(schema=self.temp_schema) exp = [ - "database", - "schema", - "table_id", - "table", - "encoded", - "diststyle", - "sortkey1", - "max_varchar", - "sortkey1_enc", - "sortkey_num", - "size", - "pct_used", - "empty", - "unsorted", - "stats_off", - "tbl_rows", - "skew_sortkey1", - "skew_rows", - "estimated_visible_rows", - "risk_event", - "vacuum_sort_benefit", - ] + 'database', 'schema', 'table_id', 'table', 'encoded', 'diststyle', 'sortkey1', + 'max_varchar', 'sortkey1_enc', 'sortkey_num', 'size', 'pct_used', 'empty', + 'unsorted', 'stats_off', 'tbl_rows', 'skew_sortkey1', 'skew_rows', + 'estimated_visible_rows', 'risk_event', 'vacuum_sort_benefit'] # Having some issues testing that the filter is working correctly, as it # takes a little bit of time for a table to show in this table and is beating @@ -763,193 +585,138 @@ def test_get_views(self): # Assert that it works with schema filter views = self.rs.get_views(schema=self.temp_schema) - expected_row = ( - self.temp_schema, - "test_view", - f"SELECT test.id, test.name FROM {self.temp_schema}.test;", - ) + expected_row = (self.temp_schema, + 'test_view', + f'SELECT test.id, test.name FROM {self.temp_schema}.test;') self.assertEqual(views.data[0], expected_row) def test_get_queries(self): # Validate that columns match expected columns queries_list = self.rs.get_queries() - exp = [ - "user", - "pid", - "xid", - "query", - "service_class", - "slot", - "start", - "state", - "queue_sec", - "exec_sec", - "cpu_sec", - "read_mb", - "spill_mb", - "return_rows", - "nl_rows", - "sql", - "alert", - ] + exp = ['user', 'pid', 'xid', 'query', 'service_class', 'slot', 'start', 'state', + 'queue_sec', 'exec_sec', 'cpu_sec', 'read_mb', 'spill_mb', 'return_rows', + 'nl_rows', 'sql', 'alert'] self.assertTrue(validate_list(exp, queries_list)) def test_get_row_count(self): - table_name = f"{self.temp_schema}.test_row_count" - self.rs.copy(self.tbl, table_name, if_exists="drop") + table_name = f'{self.temp_schema}.test_row_count' + self.rs.copy(self.tbl, table_name, if_exists='drop') count = self.rs.get_row_count(table_name) self.assertEqual(count, 3) def test_rename_table(self): - self.rs.rename_table(self.temp_schema + ".test", "test2") + self.rs.rename_table(self.temp_schema + '.test', 'test2') # Test that renamed table exists - self.assertTrue(self.rs.table_exists(self.temp_schema + ".test2")) + self.assertTrue(self.rs.table_exists(self.temp_schema + '.test2')) # Test that old table name does not exist - self.assertFalse(self.rs.table_exists(self.temp_schema + ".test")) + self.assertFalse(self.rs.table_exists(self.temp_schema + '.test')) def test_union_tables(self): # Copy in two tables - self.rs.copy(self.tbl, f"{self.temp_schema}.union_base1", if_exists="drop") - self.rs.copy(self.tbl, f"{self.temp_schema}.union_base2", if_exists="drop") + self.rs.copy(self.tbl, f'{self.temp_schema}.union_base1', if_exists='drop') + self.rs.copy(self.tbl, f'{self.temp_schema}.union_base2', if_exists='drop') # Union all the two tables and check row count - self.rs.union_tables( - f"{self.temp_schema}.union_all", - [f"{self.temp_schema}.union_base1", f"{self.temp_schema}.union_base2"], - ) - self.assertEqual( - self.rs.query(f"select * from {self.temp_schema}.union_all").num_rows, 6 - ) + self.rs.union_tables(f'{self.temp_schema}.union_all', + [f'{self.temp_schema}.union_base1', f'{self.temp_schema}.union_base2']) + self.assertEqual(self.rs.query(f"select * from {self.temp_schema}.union_all").num_rows, 6) # Union the two tables and check row count - self.rs.union_tables( - f"{self.temp_schema}.union_test", - [f"{self.temp_schema}.union_base1", f"{self.temp_schema}.union_base2"], - union_all=False, - ) - self.assertEqual( - self.rs.query(f"select * from {self.temp_schema}.union_test").num_rows, 3 - ) + self.rs.union_tables(f'{self.temp_schema}.union_test', + [f'{self.temp_schema}.union_base1', f'{self.temp_schema}.union_base2'], + union_all=False) + self.assertEqual(self.rs.query(f"select * from {self.temp_schema}.union_test").num_rows, 3) def test_populate_table_from_query(self): # Populate the source table - source_table = f"{self.temp_schema}.test_source" - self.rs.copy(self.tbl, source_table, if_exists="drop") + source_table = f'{self.temp_schema}.test_source' + self.rs.copy(self.tbl, source_table, if_exists='drop') query = f"SELECT * FROM {source_table}" # Populate the table - dest_table = f"{self.temp_schema}.test_dest" + dest_table = f'{self.temp_schema}.test_dest' self.rs.populate_table_from_query(query, dest_table) # Verify rows = self.rs.query(f"select count(*) from {dest_table}") - self.assertEqual(rows[0]["count"], 3) + self.assertEqual(rows[0]['count'], 3) # Try with if_exists='truncate' - self.rs.populate_table_from_query(query, dest_table, if_exists="truncate") + self.rs.populate_table_from_query(query, dest_table, if_exists='truncate') rows = self.rs.query(f"select count(*) from {dest_table}") - self.assertEqual(rows[0]["count"], 3) + self.assertEqual(rows[0]['count'], 3) # Try with if_exists='drop', and a distkey - self.rs.populate_table_from_query( - query, dest_table, if_exists="drop", distkey="id" - ) + self.rs.populate_table_from_query(query, dest_table, if_exists='drop', distkey='id') rows = self.rs.query(f"select count(*) from {dest_table}") - self.assertEqual(rows[0]["count"], 3) + self.assertEqual(rows[0]['count'], 3) # Try with if_exists='fail' self.assertRaises( - ValueError, - self.rs.populate_table_from_query, - query, - dest_table, - if_exists="fail", - ) + ValueError, self.rs.populate_table_from_query, query, dest_table, if_exists='fail') def test_duplicate_table(self): # Populate the source table - source_table = f"{self.temp_schema}.test_source" - self.rs.copy(self.tbl, source_table, if_exists="drop") + source_table = f'{self.temp_schema}.test_source' + self.rs.copy(self.tbl, source_table, if_exists='drop') # Duplicate the table - dest_table = f"{self.temp_schema}.test_dest" + dest_table = f'{self.temp_schema}.test_dest' self.rs.duplicate_table(source_table, dest_table) # Verify rows = self.rs.query(f"select count(*) from {dest_table}") - self.assertEqual(rows[0]["count"], 3) + self.assertEqual(rows[0]['count'], 3) # Try with if_exists='truncate' - self.rs.duplicate_table(source_table, dest_table, if_exists="truncate") + self.rs.duplicate_table(source_table, dest_table, if_exists='truncate') rows = self.rs.query(f"select count(*) from {dest_table}") - self.assertEqual(rows[0]["count"], 3) + self.assertEqual(rows[0]['count'], 3) # Try with if_exists='drop' - self.rs.duplicate_table(source_table, dest_table, if_exists="drop") + self.rs.duplicate_table(source_table, dest_table, if_exists='drop') rows = self.rs.query(f"select count(*) from {dest_table}") - self.assertEqual(rows[0]["count"], 3) + self.assertEqual(rows[0]['count'], 3) # Try with if_exists='append' - self.rs.duplicate_table(source_table, dest_table, if_exists="append") + self.rs.duplicate_table(source_table, dest_table, if_exists='append') rows = self.rs.query(f"select count(*) from {dest_table}") - self.assertEqual(rows[0]["count"], 6) + self.assertEqual(rows[0]['count'], 6) # Try with if_exists='fail' - self.assertRaises( - ValueError, - self.rs.duplicate_table, - source_table, - dest_table, - if_exists="fail", - ) + self.assertRaises(ValueError, self.rs.duplicate_table, source_table, + dest_table, if_exists='fail') # Try with invalid if_exists arg - self.assertRaises( - ValueError, - self.rs.duplicate_table, - source_table, - dest_table, - if_exists="nonsense", - ) + self.assertRaises(ValueError, self.rs.duplicate_table, source_table, + dest_table, if_exists='nonsense') def test_get_max_value(self): - date_tbl = Table( - [["id", "date_modified"], [1, "2020-01-01"], [2, "1900-01-01"]] - ) - self.rs.copy(date_tbl, f"{self.temp_schema}.test_date") + date_tbl = Table([['id', 'date_modified'], [1, '2020-01-01'], [2, '1900-01-01']]) + self.rs.copy(date_tbl, f'{self.temp_schema}.test_date') # Test return string - self.assertEqual( - self.rs.get_max_value(f"{self.temp_schema}.test_date", "date_modified"), - "2020-01-01", - ) + self.assertEqual(self.rs.get_max_value(f'{self.temp_schema}.test_date', 'date_modified'), + '2020-01-01') def test_get_columns(self): - cols = self.rs.get_columns(self.temp_schema, "test") + cols = self.rs.get_columns(self.temp_schema, 'test') # id int,name varchar(5) expected_cols = { - "id": { - "data_type": "int", - "max_length": None, - "max_precision": 32, - "max_scale": 0, - "is_nullable": True, - }, - "name": { - "data_type": "character varying", - "max_length": 5, - "max_precision": None, - "max_scale": None, - "is_nullable": True, - }, + 'id': { + 'data_type': 'int', 'max_length': None, + 'max_precision': 32, 'max_scale': 0, 'is_nullable': True}, + 'name': { + 'data_type': 'character varying', 'max_length': 5, + 'max_precision': None, 'max_scale': None, 'is_nullable': True}, } self.assertEqual(cols, expected_cols) @@ -987,109 +754,100 @@ def test_is_table(self): def test_get_table_definition(self): expected_table_def = ( - "--DROP TABLE pg_catalog.pg_amop;" - "\nCREATE TABLE IF NOT EXISTS pg_catalog.pg_amop" - "\n(" - "\n\tamopclaid OID NOT NULL ENCODE RAW" - "\n\t,amopsubtype OID NOT NULL ENCODE RAW" - "\n\t,amopstrategy SMALLINT NOT NULL ENCODE RAW" - "\n\t,amopreqcheck BOOLEAN NOT NULL ENCODE RAW" - "\n\t,amopopr OID NOT NULL ENCODE RAW" - "\n)\nDISTSTYLE EVEN\n;" - ) + '--DROP TABLE pg_catalog.pg_amop;' + '\nCREATE TABLE IF NOT EXISTS pg_catalog.pg_amop' + '\n(' + '\n\tamopclaid OID NOT NULL ENCODE RAW' + '\n\t,amopsubtype OID NOT NULL ENCODE RAW' + '\n\t,amopstrategy SMALLINT NOT NULL ENCODE RAW' + '\n\t,amopreqcheck BOOLEAN NOT NULL ENCODE RAW' + '\n\t,amopopr OID NOT NULL ENCODE RAW' + '\n)\nDISTSTYLE EVEN\n;') actual_table_def = self.rs.get_table_definition("pg_catalog.pg_amop") self.assertEqual(expected_table_def, actual_table_def) def test_get_table_definitions(self): - expected_table_defs = [ - { - "tablename": "pg_catalog.pg_amop", - "ddl": "--DROP TABLE pg_catalog.pg_amop;" - "\nCREATE TABLE IF NOT EXISTS pg_catalog.pg_amop" - "\n(" - "\n\tamopclaid OID NOT NULL ENCODE RAW" - "\n\t,amopsubtype OID NOT NULL ENCODE RAW" - "\n\t,amopstrategy SMALLINT NOT NULL ENCODE RAW" - "\n\t,amopreqcheck BOOLEAN NOT NULL ENCODE RAW" - "\n\t,amopopr OID NOT NULL ENCODE RAW" - "\n)\nDISTSTYLE EVEN\n;", - }, - { - "tablename": "pg_catalog.pg_amproc", - "ddl": "--DROP TABLE pg_catalog.pg_amproc;" - "\nCREATE TABLE IF NOT EXISTS pg_catalog.pg_amproc" - "\n(" - "\n\tamopclaid OID NOT NULL ENCODE RAW" - "\n\t,amprocsubtype OID NOT NULL ENCODE RAW" - "\n\t,amprocnum SMALLINT NOT NULL ENCODE RAW" - "\n\t,amproc REGPROC NOT NULL ENCODE RAW" - "\n)" - "\nDISTSTYLE EVEN\n;", - }, - ] + expected_table_defs = [{ + 'tablename': 'pg_catalog.pg_amop', + 'ddl': '--DROP TABLE pg_catalog.pg_amop;' + '\nCREATE TABLE IF NOT EXISTS pg_catalog.pg_amop' + '\n(' + '\n\tamopclaid OID NOT NULL ENCODE RAW' + '\n\t,amopsubtype OID NOT NULL ENCODE RAW' + '\n\t,amopstrategy SMALLINT NOT NULL ENCODE RAW' + '\n\t,amopreqcheck BOOLEAN NOT NULL ENCODE RAW' + '\n\t,amopopr OID NOT NULL ENCODE RAW' + '\n)\nDISTSTYLE EVEN\n;'}, { + 'tablename': 'pg_catalog.pg_amproc', + 'ddl': '--DROP TABLE pg_catalog.pg_amproc;' + '\nCREATE TABLE IF NOT EXISTS pg_catalog.pg_amproc' + '\n(' + '\n\tamopclaid OID NOT NULL ENCODE RAW' + '\n\t,amprocsubtype OID NOT NULL ENCODE RAW' + '\n\t,amprocnum SMALLINT NOT NULL ENCODE RAW' + '\n\t,amproc REGPROC NOT NULL ENCODE RAW' + '\n)' + '\nDISTSTYLE EVEN\n;'}] actual_table_defs = self.rs.get_table_definitions(table="pg_am%p%") self.assertEqual(expected_table_defs, actual_table_defs) def test_get_view_definition(self): expected_view_def = ( - "--DROP VIEW pg_catalog.pg_views;" - "\nCREATE OR REPLACE VIEW pg_catalog.pg_views AS" - "\n SELECT n.nspname AS schemaname" - ", c.relname AS viewname" - ", pg_get_userbyid(c.relowner) AS viewowner" - ", pg_get_viewdef(c.oid) AS definition" - "\n FROM pg_class c" - "\n LEFT JOIN pg_namespace n ON n.oid = c.relnamespace" - "\n WHERE c.relkind = 'v'::\"char\";" - ) + '--DROP VIEW pg_catalog.pg_views;' + '\nCREATE OR REPLACE VIEW pg_catalog.pg_views AS' + '\n SELECT n.nspname AS schemaname' + ', c.relname AS viewname' + ', pg_get_userbyid(c.relowner) AS viewowner' + ', pg_get_viewdef(c.oid) AS definition' + '\n FROM pg_class c' + '\n LEFT JOIN pg_namespace n ON n.oid = c.relnamespace' + '\n WHERE c.relkind = \'v\'::"char";') actual_view_def = self.rs.get_view_definition("pg_catalog.pg_views") self.assertEqual(expected_view_def, actual_view_def) def test_get_view_definitions(self): - expected_view_defs = [ - { - "viewname": "pg_catalog.pg_class_info", - "ddl": "--DROP VIEW pg_catalog.pg_class_info;" - "\nCREATE OR REPLACE VIEW pg_catalog.pg_class_info AS" - "\n SELECT pgc.oid AS reloid, pgc.relname, pgc.relnamespace, " - "pgc.reltype, pgc.relowner, pgc.relam, pgc.relfilenode, " - "pgc.reltablespace, pgc.relpages, pgc.reltuples, " - "pgc.reltoastrelid, pgc.reltoastidxid, pgc.relhasindex, " - "pgc.relisshared, pgc.relkind, pgc.relnatts, pgc.relexternid, " - "pgc.relisreplicated, pgc.relispinned, pgc.reldiststyle, " - "pgc.relprojbaseid, pgc.relchecks, pgc.reltriggers, pgc.relukeys, " - "pgc.relfkeys, pgc.relrefs, pgc.relhasoids, pgc.relhaspkey, " - "pgc.relhasrules, pgc.relhassubclass, pgc.relacl, " - "pgce0.value::smallint AS releffectivediststyle, " - "date_add('microsecond'::text, pgce1.value::bigint, " - "'2000-01-01 00:00:00'::timestamp without time zone) AS " - "relcreationtime" - "\n FROM pg_class pgc" - "\n LEFT JOIN pg_class_extended pgce0 " - "ON pgc.oid = pgce0.reloid AND pgce0.colnum = 0" - "\n LEFT JOIN pg_class_extended pgce1 " - "ON pgc.oid = pgce1.reloid AND pgce1.colnum = 1;", - } - ] + expected_view_defs = [{ + 'viewname': 'pg_catalog.pg_class_info', + 'ddl': "--DROP VIEW pg_catalog.pg_class_info;" + "\nCREATE OR REPLACE VIEW pg_catalog.pg_class_info AS" + "\n SELECT pgc.oid AS reloid, pgc.relname, pgc.relnamespace, " + "pgc.reltype, pgc.relowner, pgc.relam, pgc.relfilenode, " + "pgc.reltablespace, pgc.relpages, pgc.reltuples, " + "pgc.reltoastrelid, pgc.reltoastidxid, pgc.relhasindex, " + "pgc.relisshared, pgc.relkind, pgc.relnatts, pgc.relexternid, " + "pgc.relisreplicated, pgc.relispinned, pgc.reldiststyle, " + "pgc.relprojbaseid, pgc.relchecks, pgc.reltriggers, pgc.relukeys, " + "pgc.relfkeys, pgc.relrefs, pgc.relhasoids, pgc.relhaspkey, " + "pgc.relhasrules, pgc.relhassubclass, pgc.relacl, " + "pgce0.value::smallint AS releffectivediststyle, " + "date_add('microsecond'::text, pgce1.value::bigint, " + "'2000-01-01 00:00:00'::timestamp without time zone) AS " + "relcreationtime" + "\n FROM pg_class pgc" + "\n LEFT JOIN pg_class_extended pgce0 " + "ON pgc.oid = pgce0.reloid AND pgce0.colnum = 0" + "\n LEFT JOIN pg_class_extended pgce1 " + "ON pgc.oid = pgce1.reloid AND pgce1.colnum = 1;"}] actual_view_def = self.rs.get_view_definitions(view="pg_c%") self.assertEqual(expected_view_defs, actual_view_def) def test_alter_varchar_column_widths(self): - append_tbl = Table([["ID", "Name"], [4, "Jim"], [5, "John"], [6, "Joanna"]]) + append_tbl = Table([['ID', 'Name'], + [4, 'Jim'], + [5, 'John'], + [6, 'Joanna']]) # You can't alter column types if the table has a dependent view self.rs.query(f"DROP VIEW {self.temp_schema}.test_view") # Base table 'Name' column has a width of 5. This should expand it to 6. - self.rs.alter_varchar_column_widths(append_tbl, f"{self.temp_schema}.test") - self.assertEqual( - self.rs.get_columns(self.temp_schema, "test")["name"]["max_length"], 6 - ) + self.rs.alter_varchar_column_widths(append_tbl, f'{self.temp_schema}.test') + self.assertEqual(self.rs.get_columns(self.temp_schema, 'test')['name']['max_length'], 6) if __name__ == "__main__": diff --git a/test/test_rockthevote/test_rtv.py b/test/test_rockthevote/test_rtv.py index 7c229d56fd..9c5f1172cd 100644 --- a/test/test_rockthevote/test_rtv.py +++ b/test/test_rockthevote/test_rtv.py @@ -12,13 +12,11 @@ class TestRockTheVote(unittest.TestCase): @requests_mock.Mocker() def test_create_registration_report(self, mocker): - report_id = "123" - partner_id = "1" - partner_api_key = "abcd" - mocker.post( - "https://register.rockthevote.com/api/v4/registrant_reports.json", - json={"report_id": report_id}, - ) + report_id = '123' + partner_id = '1' + partner_api_key = 'abcd' + mocker.post('https://register.rockthevote.com/api/v4/registrant_reports.json', + json={'report_id': report_id}) rtv = RockTheVote(partner_id=partner_id, partner_api_key=partner_api_key) @@ -27,67 +25,55 @@ def test_create_registration_report(self, mocker): @requests_mock.Mocker() def test_get_registration_report(self, mocker): - partner_id = "1" - partner_api_key = "abcd" - mocker.get( - "https://register.rockthevote.com/api/v4/registrant_reports/1", - json={"download_url": "https://register.rockthevote.com/download/whatever"}, - ) - mocker.get( - "https://register.rockthevote.com/download/whatever", - text=open(f"{_dir}/sample.csv").read(), - ) + partner_id = '1' + partner_api_key = 'abcd' + mocker.get('https://register.rockthevote.com/api/v4/registrant_reports/1', + json={'download_url': 'https://register.rockthevote.com/download/whatever'}) + mocker.get('https://register.rockthevote.com/download/whatever', + text=open(f'{_dir}/sample.csv').read()) rtv = RockTheVote(partner_id=partner_id, partner_api_key=partner_api_key) result = rtv.get_registration_report(report_id=1) self.assertEqual(result.num_rows, 1) - self.assertEqual(result[0]["first_name"], "Carol") - self.assertEqual(result[0]["last_name"], "King") + self.assertEqual(result[0]['first_name'], 'Carol') + self.assertEqual(result[0]['last_name'], 'King') @requests_mock.Mocker() def test_run_registration_report(self, mocker): - report_id = "123" - partner_id = "1" - partner_api_key = "abcd" - mocker.post( - "https://register.rockthevote.com/api/v4/registrant_reports.json", - json={"report_id": report_id}, - ) - mocker.get( - "https://register.rockthevote.com/api/v4/registrant_reports/123", - json={"download_url": "https://register.rockthevote.com/download/whatever"}, - ) - mocker.get( - "https://register.rockthevote.com/download/whatever", - text=open(f"{_dir}/sample.csv").read(), - ) + report_id = '123' + partner_id = '1' + partner_api_key = 'abcd' + mocker.post('https://register.rockthevote.com/api/v4/registrant_reports.json', + json={'report_id': report_id}) + mocker.get('https://register.rockthevote.com/api/v4/registrant_reports/123', + json={'download_url': 'https://register.rockthevote.com/download/whatever'}) + mocker.get('https://register.rockthevote.com/download/whatever', + text=open(f'{_dir}/sample.csv').read()) rtv = RockTheVote(partner_id=partner_id, partner_api_key=partner_api_key) result = rtv.run_registration_report() self.assertEqual(result.num_rows, 1) - self.assertEqual(result[0]["first_name"], "Carol") - self.assertEqual(result[0]["last_name"], "King") + self.assertEqual(result[0]['first_name'], 'Carol') + self.assertEqual(result[0]['last_name'], 'King') @requests_mock.Mocker() def test_get_state_requirements(self, mocker): - partner_id = "1" - partner_api_key = "abcd" + partner_id = '1' + partner_api_key = 'abcd' - with open(f"{_dir}/sample.json", "r") as j: + with open(f'{_dir}/sample.json', 'r') as j: expected_json = json.load(j) - mocker.get( - "https://register.rockthevote.com/api/v4/state_requirements.json", - json=expected_json, - ) + mocker.get('https://register.rockthevote.com/api/v4/state_requirements.json', + json=expected_json) rtv = RockTheVote(partner_id=partner_id, partner_api_key=partner_api_key) - result = rtv.get_state_requirements("en", "fl", "33314") + result = rtv.get_state_requirements('en', 'fl', '33314') print(result.columns) self.assertEqual(result.num_rows, 1) - self.assertEqual(result[0]["requires_party"], True) - self.assertEqual(result[0]["requires_race"], True) + self.assertEqual(result[0]['requires_party'], True) + self.assertEqual(result[0]['requires_race'], True) diff --git a/test/test_s3.py b/test/test_s3.py index f64943dd26..794f410ae5 100644 --- a/test/test_s3.py +++ b/test/test_s3.py @@ -2,7 +2,8 @@ import os from datetime import datetime import pytz -from parsons import S3, Table +from parsons.aws.s3 import S3 +from parsons.etl.table import Table import urllib import time from test.utils import assert_matching_tables @@ -11,10 +12,9 @@ # to run properly. -@unittest.skipIf( - not os.environ.get("LIVE_TEST"), "Skipping because not running live test" -) +@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test') class TestS3(unittest.TestCase): + def setUp(self): self.s3 = S3() @@ -22,16 +22,16 @@ def setUp(self): self.s3.aws.session.get_credentials() # Create a new bucket - self.test_bucket = os.environ["S3_TEMP_BUCKET"] + self.test_bucket = os.environ['S3_TEMP_BUCKET'] # Trying miss random errors on not finding buckets self.s3.create_bucket(self.test_bucket) - self.test_key = "test.csv" - self.tbl = Table([{"first": "Bob", "last": "Smith"}]) + self.test_key = 'test.csv' + self.tbl = Table([{'first': 'Bob', 'last': 'Smith'}]) csv_path = self.tbl.to_csv() - self.test_key_2 = "test2.csv" - self.tbl_2 = Table([{"first": "Jack", "last": "Smith"}]) + self.test_key_2 = 'test2.csv' + self.tbl_2 = Table([{'first': 'Jack', 'last': 'Smith'}]) csv_path_2 = self.tbl_2.to_csv() # Sometimes it runs into issues putting the file @@ -44,7 +44,7 @@ def setUp(self): self.s3.put_file(self.test_bucket, self.test_key_2, csv_path_2) break except Exception: - print("Retrying putting file in bucket...") + print('Retrying putting file in bucket...') retry += 1 def tearDown(self): @@ -64,53 +64,53 @@ def test_bucket_exists(self): self.assertTrue(self.s3.bucket_exists(self.test_bucket)) # Test that a bucket that doesn't exist returns False - self.assertFalse(self.s3.bucket_exists("idontexist_bucket")) + self.assertFalse(self.s3.bucket_exists('idontexist_bucket')) def test_list_keys(self): # Put a file in the bucket csv_path = self.tbl.to_csv() - key = "test/test.csv" + key = 'test/test.csv' self.s3.put_file(self.test_bucket, key, csv_path) # Test that basic bucket list works - keys = self.s3.list_keys(self.test_bucket, prefix="test/test") + keys = self.s3.list_keys(self.test_bucket, prefix='test/test') self.assertTrue(key in keys) # Test that prefix filter works -- when there - keys = self.s3.list_keys(self.test_bucket, prefix="test") + keys = self.s3.list_keys(self.test_bucket, prefix='test') self.assertTrue(key in keys) # Test that prefix filter works -- when not there - keys = self.s3.list_keys(self.test_bucket, prefix="nope") + keys = self.s3.list_keys(self.test_bucket, prefix='nope') self.assertFalse(key in keys) def test_key_exists(self): csv_path = self.tbl.to_csv() - key = "test/test.csv" + key = 'test/test.csv' self.s3.put_file(self.test_bucket, key, csv_path) # Test that returns True if key exists self.assertTrue(self.s3.key_exists(self.test_bucket, key)) # Test that returns True if key does not exist - self.assertFalse(self.s3.key_exists(self.test_bucket, "akey")) + self.assertFalse(self.s3.key_exists(self.test_bucket, 'akey')) def test_list_keys_suffix(self): # Put a file in the bucket csv_path = self.tbl.to_csv() - key_1 = "test/test.csv" - key_2 = "test/test.gz" + key_1 = 'test/test.csv' + key_2 = 'test/test.gz' self.s3.put_file(self.test_bucket, key_1, csv_path) self.s3.put_file(self.test_bucket, key_2, csv_path) - keys = self.s3.list_keys(self.test_bucket, suffix="csv") + keys = self.s3.list_keys(self.test_bucket, suffix='csv') self.assertTrue(key_1 in keys) self.assertFalse(key_2 in keys) - keys = self.s3.list_keys(self.test_bucket, suffix="gz") + keys = self.s3.list_keys(self.test_bucket, suffix='gz') self.assertFalse(key_1 in keys) self.assertTrue(key_2 in keys) @@ -168,17 +168,8 @@ def test_transfer_bucket(self): # Transfer and delete original self.s3.transfer_bucket( - self.test_bucket, - self.test_key_2, - destination_bucket, - None, - None, - None, - None, - None, - False, - True, - ) + self.test_bucket, self.test_key_2, destination_bucket, + None, None, None, None, None, False, True) path_2 = self.s3.get_file(destination_bucket, self.test_key_2) result_tbl_2 = Table.from_csv(path_2) assert_matching_tables(self.tbl_2, result_tbl_2) diff --git a/test/test_salesforce/test_salesforce.py b/test/test_salesforce/test_salesforce.py index 18e6e3b6b7..295f91178c 100644 --- a/test/test_salesforce/test_salesforce.py +++ b/test/test_salesforce/test_salesforce.py @@ -1,98 +1,95 @@ import os import unittest import unittest.mock as mock -from parsons import Salesforce, Table +from parsons.salesforce.salesforce import Salesforce, Table class TestSalesforce(unittest.TestCase): + def setUp(self): - os.environ["SALESFORCE_USERNAME"] = "MYFAKEUSERNAME" - os.environ["SALESFORCE_PASSWORD"] = "MYFAKEPASSWORD" - os.environ["SALESFORCE_SECURITY_TOKEN"] = "MYFAKESECURITYTOKEN" + + os.environ['SALESFORCE_USERNAME'] = 'MYFAKEUSERNAME' + os.environ['SALESFORCE_PASSWORD'] = 'MYFAKEPASSWORD' + os.environ['SALESFORCE_SECURITY_TOKEN'] = 'MYFAKESECURITYTOKEN' self.sf = Salesforce() self.sf._client = mock.MagicMock() - self.sf._client.query_all.return_value = { - "totalSize": 1, - "done": True, - "records": [ - { - "attributes": { - "type": "Contact", - "url": "/services/data/v38.0/" - + "sobjects/Contact/" - + "1234567890AaBbC", - }, - "Id": "1234567890AaBbC", - } - ], - } - self.sf._client.bulk.Contact.insert.return_value = [ - {"success": True, "created": True, "id": "1234567890AaBbC", "errors": []} - ] - self.sf._client.bulk.Contact.update.return_value = [ - {"success": True, "created": False, "id": "1234567890AaBbC", "errors": []} - ] + + self.sf._client.query_all.return_value = [{'Id': 1, 'value': 'FAKE'}] + self.sf._client.bulk.Contact.insert.return_value = [{ + 'success': True, 'created': True, 'id': '1234567890AaBbC', 'errors': [] + }] + self.sf._client.bulk.Contact.update.return_value = [{ + 'success': True, 'created': False, 'id': '1234567890AaBbC', 'errors': [] + }] self.sf._client.bulk.Contact.upsert.return_value = [ - {"success": True, "created": False, "id": "1234567890AaBbC", "errors": []}, - {"success": True, "created": True, "id": "1234567890AaBbc", "errors": []}, - ] - self.sf._client.bulk.Contact.delete.return_value = [ - {"success": True, "created": False, "id": "1234567890AaBbC", "errors": []} + { + 'success': True, 'created': False, 'id': '1234567890AaBbC', 'errors': [] + }, + { + 'success': True, 'created': True, 'id': '1234567890AaBbc', 'errors': [] + } ] + self.sf._client.bulk.Contact.delete.return_value = [{ + 'success': True, 'created': False, 'id': '1234567890AaBbC', 'errors': [] + }] def test_describe(self): + pass def test_describe_fields(self): + # TO DO: test this with requests mock instead? pass def test_query(self): - fake_soql = "FAKESOQL" + + fake_soql = 'FAKESOQL' response = self.sf.query(fake_soql) assert self.sf.client.query_all.called_with(fake_soql) - self.assertEqual(response["records"][0]["Id"], "1234567890AaBbC") + self.assertEqual(response[0]['value'], 'FAKE') def test_insert(self): - fake_data = Table([{"firstname": "Chrisjen", "lastname": "Avasarala"}]) - response = self.sf.insert_record("Contact", fake_data) + + fake_data = Table([{'firstname': 'Chrisjen', 'lastname': 'Avasarala'}]) + response = self.sf.insert_record('Contact', fake_data) assert self.sf.client.bulk.Contact.insert.called_with(fake_data) - assert response[0]["created"] + assert response[0]['created'] def test_update(self): - fake_data = Table( - [ - { - "id": "1234567890AaBbC", - "firstname": "Chrisjen", - "lastname": "Avasarala", - } - ] - ) - response = self.sf.update_record("Contact", fake_data) + + fake_data = Table([{ + 'id': '1234567890AaBbC', + 'firstname': 'Chrisjen', + 'lastname': 'Avasarala' + }]) + response = self.sf.update_record('Contact', fake_data) assert self.sf.client.bulk.Contact.update.called_with(fake_data) - assert not response[0]["created"] + assert not response[0]['created'] def test_upsert(self): - fake_data = Table( - [ - { - "id": "1234567890AaBbC", - "firstname": "Chrisjen", - "lastname": "Avasarala", - }, - {"id": None, "firstname": "Roberta", "lastname": "Draper"}, - ] - ) - response = self.sf.upsert_record("Contact", fake_data, "id") + + fake_data = Table([ + { + 'id': '1234567890AaBbC', + 'firstname': 'Chrisjen', + 'lastname': 'Avasarala' + }, + { + 'id': None, + 'firstname': 'Roberta', + 'lastname': 'Draper' + }]) + response = self.sf.upsert_record('Contact', fake_data, 'id') assert self.sf.client.bulk.Contact.update.called_with(fake_data) print(response) - assert not response[0]["created"] - assert response[1]["created"] + assert not response[0]['created'] + assert response[1]['created'] def test_delete(self): - fake_data = Table([{"id": "1234567890AaBbC"}]) - response = self.sf.delete_record("Contact", fake_data) + + fake_data = Table([{'id': '1234567890AaBbC'}]) + response = self.sf.delete_record('Contact', fake_data) assert self.sf.client.bulk.Contact.update.called_with(fake_data) - assert not response[0]["created"] + assert not response[0]['created'] diff --git a/test/test_scytl/114729_county_expected.csv b/test/test_scytl/114729_county_expected.csv deleted file mode 100644 index 92c6945e50..0000000000 --- a/test/test_scytl/114729_county_expected.csv +++ /dev/null @@ -1,361 +0,0 @@ -state,county_name,office,ballots_cast,reg_voters,precincts_reporting,total_precincts,vote_method,candidate_name,candidate_party,recorded_votes,timestamp_last_updated -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Election Day Votes,Jessica Allison Fore,DEM,233,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Election Day Votes,Jessica Allison Fore,DEM,81,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Election Day Votes,Jessica Allison Fore,DEM,1306,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Election Day Votes,Jessica Allison Fore,DEM,70,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Election Day Votes,Jessica Allison Fore,DEM,86,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Election Day Votes,Jessica Allison Fore,DEM,140,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Election Day Votes,Jessica Allison Fore,DEM,561,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Election Day Votes,Jessica Allison Fore,DEM,169,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Election Day Votes,Jessica Allison Fore,DEM,46,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Election Day Votes,Jessica Allison Fore,DEM,111,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Election Day Votes,Jessica Allison Fore,DEM,65,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Election Day Votes,Jessica Allison Fore,DEM,179,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Election Day Votes,Jessica Allison Fore,DEM,168,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Election Day Votes,Jessica Allison Fore,DEM,75,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Election Day Votes,Jessica Allison Fore,DEM,91,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Election Day Votes,Jessica Allison Fore,DEM,11,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Election Day Votes,Jessica Allison Fore,DEM,245,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Election Day Votes,Jessica Allison Fore,DEM,13,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Absentee by Mail Votes,Jessica Allison Fore,DEM,36,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Absentee by Mail Votes,Jessica Allison Fore,DEM,8,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Absentee by Mail Votes,Jessica Allison Fore,DEM,172,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Absentee by Mail Votes,Jessica Allison Fore,DEM,9,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Absentee by Mail Votes,Jessica Allison Fore,DEM,9,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Absentee by Mail Votes,Jessica Allison Fore,DEM,23,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Absentee by Mail Votes,Jessica Allison Fore,DEM,62,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Absentee by Mail Votes,Jessica Allison Fore,DEM,36,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Absentee by Mail Votes,Jessica Allison Fore,DEM,9,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Absentee by Mail Votes,Jessica Allison Fore,DEM,8,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Absentee by Mail Votes,Jessica Allison Fore,DEM,5,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Absentee by Mail Votes,Jessica Allison Fore,DEM,21,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Absentee by Mail Votes,Jessica Allison Fore,DEM,26,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Absentee by Mail Votes,Jessica Allison Fore,DEM,16,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Absentee by Mail Votes,Jessica Allison Fore,DEM,16,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Absentee by Mail Votes,Jessica Allison Fore,DEM,2,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Absentee by Mail Votes,Jessica Allison Fore,DEM,28,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Absentee by Mail Votes,Jessica Allison Fore,DEM,1,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Advance Voting Votes,Jessica Allison Fore,DEM,166,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Advance Voting Votes,Jessica Allison Fore,DEM,104,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Advance Voting Votes,Jessica Allison Fore,DEM,1105,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Advance Voting Votes,Jessica Allison Fore,DEM,52,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Advance Voting Votes,Jessica Allison Fore,DEM,77,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Advance Voting Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Advance Voting Votes,Jessica Allison Fore,DEM,648,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Advance Voting Votes,Jessica Allison Fore,DEM,196,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Advance Voting Votes,Jessica Allison Fore,DEM,28,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Advance Voting Votes,Jessica Allison Fore,DEM,68,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Advance Voting Votes,Jessica Allison Fore,DEM,64,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Advance Voting Votes,Jessica Allison Fore,DEM,120,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Advance Voting Votes,Jessica Allison Fore,DEM,169,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Advance Voting Votes,Jessica Allison Fore,DEM,81,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Advance Voting Votes,Jessica Allison Fore,DEM,48,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Advance Voting Votes,Jessica Allison Fore,DEM,7,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Advance Voting Votes,Jessica Allison Fore,DEM,188,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Advance Voting Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Provisional Votes,Jessica Allison Fore,DEM,2,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Provisional Votes,Jessica Allison Fore,DEM,1,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Provisional Votes,Jessica Allison Fore,DEM,0,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Election Day Votes,Tabitha Johnson-Green,DEM,479,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Election Day Votes,Tabitha Johnson-Green,DEM,174,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Election Day Votes,Tabitha Johnson-Green,DEM,2472,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Election Day Votes,Tabitha Johnson-Green,DEM,93,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Election Day Votes,Tabitha Johnson-Green,DEM,301,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Election Day Votes,Tabitha Johnson-Green,DEM,589,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Election Day Votes,Tabitha Johnson-Green,DEM,1736,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Election Day Votes,Tabitha Johnson-Green,DEM,248,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Election Day Votes,Tabitha Johnson-Green,DEM,102,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Election Day Votes,Tabitha Johnson-Green,DEM,161,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Election Day Votes,Tabitha Johnson-Green,DEM,149,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Election Day Votes,Tabitha Johnson-Green,DEM,563,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Election Day Votes,Tabitha Johnson-Green,DEM,249,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Election Day Votes,Tabitha Johnson-Green,DEM,141,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Election Day Votes,Tabitha Johnson-Green,DEM,160,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Election Day Votes,Tabitha Johnson-Green,DEM,34,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Election Day Votes,Tabitha Johnson-Green,DEM,564,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Election Day Votes,Tabitha Johnson-Green,DEM,39,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,59,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,18,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,301,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,14,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,49,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,137,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,89,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,33,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,18,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,4,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,23,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,58,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,26,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,27,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,24,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,13,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,58,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,2,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Advance Voting Votes,Tabitha Johnson-Green,DEM,316,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Advance Voting Votes,Tabitha Johnson-Green,DEM,210,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Advance Voting Votes,Tabitha Johnson-Green,DEM,2042,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Advance Voting Votes,Tabitha Johnson-Green,DEM,97,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Advance Voting Votes,Tabitha Johnson-Green,DEM,209,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Advance Voting Votes,Tabitha Johnson-Green,DEM,0,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Advance Voting Votes,Tabitha Johnson-Green,DEM,1907,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Advance Voting Votes,Tabitha Johnson-Green,DEM,192,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Advance Voting Votes,Tabitha Johnson-Green,DEM,59,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Advance Voting Votes,Tabitha Johnson-Green,DEM,71,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Advance Voting Votes,Tabitha Johnson-Green,DEM,143,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Advance Voting Votes,Tabitha Johnson-Green,DEM,483,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Advance Voting Votes,Tabitha Johnson-Green,DEM,192,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Advance Voting Votes,Tabitha Johnson-Green,DEM,100,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Advance Voting Votes,Tabitha Johnson-Green,DEM,122,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Advance Voting Votes,Tabitha Johnson-Green,DEM,33,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Advance Voting Votes,Tabitha Johnson-Green,DEM,430,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Advance Voting Votes,Tabitha Johnson-Green,DEM,0,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Provisional Votes,Tabitha Johnson-Green,DEM,0,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Provisional Votes,Tabitha Johnson-Green,DEM,1,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Provisional Votes,Tabitha Johnson-Green,DEM,2,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Provisional Votes,Tabitha Johnson-Green,DEM,3,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Provisional Votes,Tabitha Johnson-Green,DEM,0,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Provisional Votes,Tabitha Johnson-Green,DEM,0,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Provisional Votes,Tabitha Johnson-Green,DEM,1,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Provisional Votes,Tabitha Johnson-Green,DEM,1,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Provisional Votes,Tabitha Johnson-Green,DEM,0,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Provisional Votes,Tabitha Johnson-Green,DEM,1,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Provisional Votes,Tabitha Johnson-Green,DEM,1,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Provisional Votes,Tabitha Johnson-Green,DEM,3,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Provisional Votes,Tabitha Johnson-Green,DEM,0,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Provisional Votes,Tabitha Johnson-Green,DEM,0,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Provisional Votes,Tabitha Johnson-Green,DEM,0,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Provisional Votes,Tabitha Johnson-Green,DEM,0,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Provisional Votes,Tabitha Johnson-Green,DEM,0,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Provisional Votes,Tabitha Johnson-Green,DEM,0,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Election Day Votes,Phyllis Hatcher,DEM,143,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Election Day Votes,Phyllis Hatcher,DEM,75,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Election Day Votes,Phyllis Hatcher,DEM,727,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Election Day Votes,Phyllis Hatcher,DEM,56,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Election Day Votes,Phyllis Hatcher,DEM,76,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Election Day Votes,Phyllis Hatcher,DEM,340,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Election Day Votes,Phyllis Hatcher,DEM,850,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Election Day Votes,Phyllis Hatcher,DEM,96,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Election Day Votes,Phyllis Hatcher,DEM,54,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Election Day Votes,Phyllis Hatcher,DEM,60,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Election Day Votes,Phyllis Hatcher,DEM,97,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Election Day Votes,Phyllis Hatcher,DEM,265,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Election Day Votes,Phyllis Hatcher,DEM,84,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Election Day Votes,Phyllis Hatcher,DEM,36,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Election Day Votes,Phyllis Hatcher,DEM,96,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Election Day Votes,Phyllis Hatcher,DEM,35,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Election Day Votes,Phyllis Hatcher,DEM,201,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Election Day Votes,Phyllis Hatcher,DEM,10,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Absentee by Mail Votes,Phyllis Hatcher,DEM,32,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Absentee by Mail Votes,Phyllis Hatcher,DEM,16,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Absentee by Mail Votes,Phyllis Hatcher,DEM,150,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Absentee by Mail Votes,Phyllis Hatcher,DEM,14,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Absentee by Mail Votes,Phyllis Hatcher,DEM,12,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Absentee by Mail Votes,Phyllis Hatcher,DEM,45,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Absentee by Mail Votes,Phyllis Hatcher,DEM,96,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Absentee by Mail Votes,Phyllis Hatcher,DEM,11,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Absentee by Mail Votes,Phyllis Hatcher,DEM,19,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Absentee by Mail Votes,Phyllis Hatcher,DEM,9,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Absentee by Mail Votes,Phyllis Hatcher,DEM,32,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Absentee by Mail Votes,Phyllis Hatcher,DEM,52,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Absentee by Mail Votes,Phyllis Hatcher,DEM,19,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Absentee by Mail Votes,Phyllis Hatcher,DEM,3,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Absentee by Mail Votes,Phyllis Hatcher,DEM,18,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Absentee by Mail Votes,Phyllis Hatcher,DEM,17,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Absentee by Mail Votes,Phyllis Hatcher,DEM,27,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Absentee by Mail Votes,Phyllis Hatcher,DEM,1,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Advance Voting Votes,Phyllis Hatcher,DEM,135,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Advance Voting Votes,Phyllis Hatcher,DEM,132,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Advance Voting Votes,Phyllis Hatcher,DEM,672,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Advance Voting Votes,Phyllis Hatcher,DEM,57,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Advance Voting Votes,Phyllis Hatcher,DEM,76,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Advance Voting Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Advance Voting Votes,Phyllis Hatcher,DEM,1139,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Advance Voting Votes,Phyllis Hatcher,DEM,117,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Advance Voting Votes,Phyllis Hatcher,DEM,55,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Advance Voting Votes,Phyllis Hatcher,DEM,33,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Advance Voting Votes,Phyllis Hatcher,DEM,129,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Advance Voting Votes,Phyllis Hatcher,DEM,254,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Advance Voting Votes,Phyllis Hatcher,DEM,105,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Advance Voting Votes,Phyllis Hatcher,DEM,42,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Advance Voting Votes,Phyllis Hatcher,DEM,79,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Advance Voting Votes,Phyllis Hatcher,DEM,39,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Advance Voting Votes,Phyllis Hatcher,DEM,176,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Advance Voting Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Provisional Votes,Phyllis Hatcher,DEM,1,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Provisional Votes,Phyllis Hatcher,DEM,1,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Provisional Votes,Phyllis Hatcher,DEM,2,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Provisional Votes,Phyllis Hatcher,DEM,0,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Election Day Votes,Femi Oduwole,DEM,173,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Election Day Votes,Femi Oduwole,DEM,42,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Election Day Votes,Femi Oduwole,DEM,517,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Election Day Votes,Femi Oduwole,DEM,28,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Election Day Votes,Femi Oduwole,DEM,25,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Election Day Votes,Femi Oduwole,DEM,54,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Election Day Votes,Femi Oduwole,DEM,687,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Election Day Votes,Femi Oduwole,DEM,101,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Election Day Votes,Femi Oduwole,DEM,19,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Election Day Votes,Femi Oduwole,DEM,56,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Election Day Votes,Femi Oduwole,DEM,30,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Election Day Votes,Femi Oduwole,DEM,152,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Election Day Votes,Femi Oduwole,DEM,58,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Election Day Votes,Femi Oduwole,DEM,34,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Election Day Votes,Femi Oduwole,DEM,32,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Election Day Votes,Femi Oduwole,DEM,3,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Election Day Votes,Femi Oduwole,DEM,368,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Election Day Votes,Femi Oduwole,DEM,8,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Absentee by Mail Votes,Femi Oduwole,DEM,12,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Absentee by Mail Votes,Femi Oduwole,DEM,1,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Absentee by Mail Votes,Femi Oduwole,DEM,36,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Absentee by Mail Votes,Femi Oduwole,DEM,1,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Absentee by Mail Votes,Femi Oduwole,DEM,3,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Absentee by Mail Votes,Femi Oduwole,DEM,3,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Absentee by Mail Votes,Femi Oduwole,DEM,24,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Absentee by Mail Votes,Femi Oduwole,DEM,2,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Absentee by Mail Votes,Femi Oduwole,DEM,1,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Absentee by Mail Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Absentee by Mail Votes,Femi Oduwole,DEM,7,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Absentee by Mail Votes,Femi Oduwole,DEM,8,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Absentee by Mail Votes,Femi Oduwole,DEM,4,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Absentee by Mail Votes,Femi Oduwole,DEM,4,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Absentee by Mail Votes,Femi Oduwole,DEM,5,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Absentee by Mail Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Absentee by Mail Votes,Femi Oduwole,DEM,25,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Absentee by Mail Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Advance Voting Votes,Femi Oduwole,DEM,98,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Advance Voting Votes,Femi Oduwole,DEM,47,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Advance Voting Votes,Femi Oduwole,DEM,358,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Advance Voting Votes,Femi Oduwole,DEM,25,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Advance Voting Votes,Femi Oduwole,DEM,28,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Advance Voting Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Advance Voting Votes,Femi Oduwole,DEM,703,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Advance Voting Votes,Femi Oduwole,DEM,78,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Advance Voting Votes,Femi Oduwole,DEM,16,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Advance Voting Votes,Femi Oduwole,DEM,34,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Advance Voting Votes,Femi Oduwole,DEM,26,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Advance Voting Votes,Femi Oduwole,DEM,127,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Advance Voting Votes,Femi Oduwole,DEM,34,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Advance Voting Votes,Femi Oduwole,DEM,26,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Advance Voting Votes,Femi Oduwole,DEM,23,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Advance Voting Votes,Femi Oduwole,DEM,5,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Advance Voting Votes,Femi Oduwole,DEM,273,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Advance Voting Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Provisional Votes,Femi Oduwole,DEM,1,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Provisional Votes,Femi Oduwole,DEM,0,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Election Day Votes,Paul Walton,DEM,66,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Election Day Votes,Paul Walton,DEM,32,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Election Day Votes,Paul Walton,DEM,436,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Election Day Votes,Paul Walton,DEM,49,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Election Day Votes,Paul Walton,DEM,33,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Election Day Votes,Paul Walton,DEM,115,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Election Day Votes,Paul Walton,DEM,185,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Election Day Votes,Paul Walton,DEM,76,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Election Day Votes,Paul Walton,DEM,18,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Election Day Votes,Paul Walton,DEM,67,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Election Day Votes,Paul Walton,DEM,34,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Election Day Votes,Paul Walton,DEM,63,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Election Day Votes,Paul Walton,DEM,62,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Election Day Votes,Paul Walton,DEM,35,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Election Day Votes,Paul Walton,DEM,38,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Election Day Votes,Paul Walton,DEM,10,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Election Day Votes,Paul Walton,DEM,120,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Election Day Votes,Paul Walton,DEM,13,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Absentee by Mail Votes,Paul Walton,DEM,14,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Absentee by Mail Votes,Paul Walton,DEM,10,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Absentee by Mail Votes,Paul Walton,DEM,85,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Absentee by Mail Votes,Paul Walton,DEM,32,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Absentee by Mail Votes,Paul Walton,DEM,3,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Absentee by Mail Votes,Paul Walton,DEM,40,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Absentee by Mail Votes,Paul Walton,DEM,20,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Absentee by Mail Votes,Paul Walton,DEM,18,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Absentee by Mail Votes,Paul Walton,DEM,6,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Absentee by Mail Votes,Paul Walton,DEM,14,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Absentee by Mail Votes,Paul Walton,DEM,1,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Absentee by Mail Votes,Paul Walton,DEM,12,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Absentee by Mail Votes,Paul Walton,DEM,18,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Absentee by Mail Votes,Paul Walton,DEM,9,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Absentee by Mail Votes,Paul Walton,DEM,24,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Absentee by Mail Votes,Paul Walton,DEM,3,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Absentee by Mail Votes,Paul Walton,DEM,21,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Absentee by Mail Votes,Paul Walton,DEM,1,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Advance Voting Votes,Paul Walton,DEM,69,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Advance Voting Votes,Paul Walton,DEM,53,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Advance Voting Votes,Paul Walton,DEM,373,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Advance Voting Votes,Paul Walton,DEM,94,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Advance Voting Votes,Paul Walton,DEM,38,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Advance Voting Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Advance Voting Votes,Paul Walton,DEM,239,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Advance Voting Votes,Paul Walton,DEM,56,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Advance Voting Votes,Paul Walton,DEM,13,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Advance Voting Votes,Paul Walton,DEM,56,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Advance Voting Votes,Paul Walton,DEM,24,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Advance Voting Votes,Paul Walton,DEM,54,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Advance Voting Votes,Paul Walton,DEM,52,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Advance Voting Votes,Paul Walton,DEM,37,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Advance Voting Votes,Paul Walton,DEM,56,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Advance Voting Votes,Paul Walton,DEM,1,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Advance Voting Votes,Paul Walton,DEM,76,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Advance Voting Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Barrow,US House of Representatives - District 10 - Dem,2182,55127,8,8,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Butts,US House of Representatives - District 10 - Dem,1157,18378,5,5,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Clarke,US House of Representatives - District 10 - Dem,12476,71350,24,24,Provisional Votes,Paul Walton,DEM,1,2022-06-14 20:19:31+00:00 -GA,Elbert,US House of Representatives - District 10 - Dem,773,12311,11,11,Provisional Votes,Paul Walton,DEM,2,2022-06-14 20:19:31+00:00 -GA,Greene,US House of Representatives - District 10 - Dem,1142,14545,5,5,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Hancock,US House of Representatives - District 10 - Dem,1802,5706,10,10,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Henry,US House of Representatives - District 10 - Dem,9546,82095,18,18,Provisional Votes,Paul Walton,DEM,2,2022-06-14 20:19:31+00:00 -GA,Jackson,US House of Representatives - District 10 - Dem,1578,53627,4,4,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Jasper,US House of Representatives - District 10 - Dem,530,10585,3,3,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Madison,US House of Representatives - District 10 - Dem,839,20681,12,12,Provisional Votes,Paul Walton,DEM,1,2022-06-14 20:19:31+00:00 -GA,Morgan,US House of Representatives - District 10 - Dem,931,14937,5,5,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Newton,US House of Representatives - District 10 - Dem,2621,40630,13,13,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Oconee,US House of Representatives - District 10 - Dem,1449,29155,8,8,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Oglethorpe,US House of Representatives - District 10 - Dem,728,10762,3,3,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Putnam,US House of Representatives - District 10 - Dem,945,16007,5,5,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Taliaferro,US House of Representatives - District 10 - Dem,298,0,2,2,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Walton,US House of Representatives - District 10 - Dem,3031,70149,21,21,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 -GA,Wilkes,US House of Representatives - District 10 - Dem,110,2421,3,3,Provisional Votes,Paul Walton,DEM,0,2022-06-14 20:19:31+00:00 diff --git a/test/test_scytl/114729_detailxml.zip b/test/test_scytl/114729_detailxml.zip deleted file mode 100644 index 993b3b21ac..0000000000 Binary files a/test/test_scytl/114729_detailxml.zip and /dev/null differ diff --git a/test/test_scytl/114729_precinct_expected.csv b/test/test_scytl/114729_precinct_expected.csv deleted file mode 100644 index 7944dba2ee..0000000000 --- a/test/test_scytl/114729_precinct_expected.csv +++ /dev/null @@ -1,641 +0,0 @@ -state,county_name,county_id,office,ballots_cast,reg_voters,vote_method,candidate_name,candidate_party,precinct_name,recorded_votes,voter_turnout,percent_reporting,timestamp_last_updated -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Absentee by Mail Votes,Jessica Allison Fore,DEM,01,3,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Absentee by Mail Votes,Jessica Allison Fore,DEM,02,2,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Absentee by Mail Votes,Jessica Allison Fore,DEM,03,4,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Absentee by Mail Votes,Jessica Allison Fore,DEM,04,0,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Absentee by Mail Votes,Jessica Allison Fore,DEM,05,3,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Absentee by Mail Votes,Jessica Allison Fore,DEM,08,4,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Absentee by Mail Votes,Jessica Allison Fore,DEM,13,12,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Absentee by Mail Votes,Jessica Allison Fore,DEM,16,8,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Advance Voting Votes,Jessica Allison Fore,DEM,01,9,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Advance Voting Votes,Jessica Allison Fore,DEM,02,13,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Advance Voting Votes,Jessica Allison Fore,DEM,03,18,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Advance Voting Votes,Jessica Allison Fore,DEM,04,9,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Advance Voting Votes,Jessica Allison Fore,DEM,05,22,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Advance Voting Votes,Jessica Allison Fore,DEM,08,22,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Advance Voting Votes,Jessica Allison Fore,DEM,13,30,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Advance Voting Votes,Jessica Allison Fore,DEM,16,43,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Election Day Votes,Jessica Allison Fore,DEM,01,11,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Election Day Votes,Jessica Allison Fore,DEM,02,25,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Election Day Votes,Jessica Allison Fore,DEM,03,17,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Election Day Votes,Jessica Allison Fore,DEM,04,14,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Election Day Votes,Jessica Allison Fore,DEM,05,42,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Election Day Votes,Jessica Allison Fore,DEM,08,19,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Election Day Votes,Jessica Allison Fore,DEM,13,43,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Election Day Votes,Jessica Allison Fore,DEM,16,62,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Provisional Votes,Jessica Allison Fore,DEM,01,0,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Provisional Votes,Jessica Allison Fore,DEM,02,0,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Provisional Votes,Jessica Allison Fore,DEM,03,0,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Provisional Votes,Jessica Allison Fore,DEM,04,0,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Provisional Votes,Jessica Allison Fore,DEM,05,0,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Provisional Votes,Jessica Allison Fore,DEM,08,0,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Provisional Votes,Jessica Allison Fore,DEM,13,0,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Provisional Votes,Jessica Allison Fore,DEM,16,0,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,01,3,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,02,7,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,03,6,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,04,7,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,05,9,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,08,7,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,13,8,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,16,12,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Advance Voting Votes,Tabitha Johnson-Green,DEM,01,16,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Advance Voting Votes,Tabitha Johnson-Green,DEM,02,27,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Advance Voting Votes,Tabitha Johnson-Green,DEM,03,21,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Advance Voting Votes,Tabitha Johnson-Green,DEM,04,14,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Advance Voting Votes,Tabitha Johnson-Green,DEM,05,46,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Advance Voting Votes,Tabitha Johnson-Green,DEM,08,51,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Advance Voting Votes,Tabitha Johnson-Green,DEM,13,78,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Advance Voting Votes,Tabitha Johnson-Green,DEM,16,63,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Election Day Votes,Tabitha Johnson-Green,DEM,01,26,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Election Day Votes,Tabitha Johnson-Green,DEM,02,35,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Election Day Votes,Tabitha Johnson-Green,DEM,03,46,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Election Day Votes,Tabitha Johnson-Green,DEM,04,22,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Election Day Votes,Tabitha Johnson-Green,DEM,05,107,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Election Day Votes,Tabitha Johnson-Green,DEM,08,44,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Election Day Votes,Tabitha Johnson-Green,DEM,13,93,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Election Day Votes,Tabitha Johnson-Green,DEM,16,106,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Provisional Votes,Tabitha Johnson-Green,DEM,01,0,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Provisional Votes,Tabitha Johnson-Green,DEM,02,0,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Provisional Votes,Tabitha Johnson-Green,DEM,03,0,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Provisional Votes,Tabitha Johnson-Green,DEM,04,0,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Provisional Votes,Tabitha Johnson-Green,DEM,05,0,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Provisional Votes,Tabitha Johnson-Green,DEM,08,0,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Provisional Votes,Tabitha Johnson-Green,DEM,13,0,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Provisional Votes,Tabitha Johnson-Green,DEM,16,0,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Absentee by Mail Votes,Phyllis Hatcher,DEM,01,0,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Absentee by Mail Votes,Phyllis Hatcher,DEM,02,2,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Absentee by Mail Votes,Phyllis Hatcher,DEM,03,9,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Absentee by Mail Votes,Phyllis Hatcher,DEM,04,5,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Absentee by Mail Votes,Phyllis Hatcher,DEM,05,7,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Absentee by Mail Votes,Phyllis Hatcher,DEM,08,5,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Absentee by Mail Votes,Phyllis Hatcher,DEM,13,3,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Absentee by Mail Votes,Phyllis Hatcher,DEM,16,1,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Advance Voting Votes,Phyllis Hatcher,DEM,01,6,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Advance Voting Votes,Phyllis Hatcher,DEM,02,8,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Advance Voting Votes,Phyllis Hatcher,DEM,03,11,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Advance Voting Votes,Phyllis Hatcher,DEM,04,4,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Advance Voting Votes,Phyllis Hatcher,DEM,05,34,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Advance Voting Votes,Phyllis Hatcher,DEM,08,18,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Advance Voting Votes,Phyllis Hatcher,DEM,13,29,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Advance Voting Votes,Phyllis Hatcher,DEM,16,25,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Election Day Votes,Phyllis Hatcher,DEM,01,10,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Election Day Votes,Phyllis Hatcher,DEM,02,10,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Election Day Votes,Phyllis Hatcher,DEM,03,17,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Election Day Votes,Phyllis Hatcher,DEM,04,4,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Election Day Votes,Phyllis Hatcher,DEM,05,29,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Election Day Votes,Phyllis Hatcher,DEM,08,18,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Election Day Votes,Phyllis Hatcher,DEM,13,23,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Election Day Votes,Phyllis Hatcher,DEM,16,32,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Provisional Votes,Phyllis Hatcher,DEM,01,0,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Provisional Votes,Phyllis Hatcher,DEM,02,0,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Provisional Votes,Phyllis Hatcher,DEM,03,0,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Provisional Votes,Phyllis Hatcher,DEM,04,0,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Provisional Votes,Phyllis Hatcher,DEM,05,0,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Provisional Votes,Phyllis Hatcher,DEM,08,1,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Provisional Votes,Phyllis Hatcher,DEM,13,0,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Provisional Votes,Phyllis Hatcher,DEM,16,0,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Absentee by Mail Votes,Femi Oduwole,DEM,01,2,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Absentee by Mail Votes,Femi Oduwole,DEM,02,0,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Absentee by Mail Votes,Femi Oduwole,DEM,03,0,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Absentee by Mail Votes,Femi Oduwole,DEM,04,0,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Absentee by Mail Votes,Femi Oduwole,DEM,05,0,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Absentee by Mail Votes,Femi Oduwole,DEM,08,1,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Absentee by Mail Votes,Femi Oduwole,DEM,13,0,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Absentee by Mail Votes,Femi Oduwole,DEM,16,9,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Advance Voting Votes,Femi Oduwole,DEM,01,4,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Advance Voting Votes,Femi Oduwole,DEM,02,7,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Advance Voting Votes,Femi Oduwole,DEM,03,9,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Advance Voting Votes,Femi Oduwole,DEM,04,5,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Advance Voting Votes,Femi Oduwole,DEM,05,18,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Advance Voting Votes,Femi Oduwole,DEM,08,9,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Advance Voting Votes,Femi Oduwole,DEM,13,23,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Advance Voting Votes,Femi Oduwole,DEM,16,23,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Election Day Votes,Femi Oduwole,DEM,01,15,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Election Day Votes,Femi Oduwole,DEM,02,14,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Election Day Votes,Femi Oduwole,DEM,03,10,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Election Day Votes,Femi Oduwole,DEM,04,7,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Election Day Votes,Femi Oduwole,DEM,05,35,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Election Day Votes,Femi Oduwole,DEM,08,14,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Election Day Votes,Femi Oduwole,DEM,13,40,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Election Day Votes,Femi Oduwole,DEM,16,38,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Provisional Votes,Femi Oduwole,DEM,01,0,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Provisional Votes,Femi Oduwole,DEM,02,0,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Provisional Votes,Femi Oduwole,DEM,03,0,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Provisional Votes,Femi Oduwole,DEM,04,0,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Provisional Votes,Femi Oduwole,DEM,05,0,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Provisional Votes,Femi Oduwole,DEM,08,0,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Provisional Votes,Femi Oduwole,DEM,13,0,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Provisional Votes,Femi Oduwole,DEM,16,0,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Absentee by Mail Votes,Paul Walton,DEM,01,0,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Absentee by Mail Votes,Paul Walton,DEM,02,2,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Absentee by Mail Votes,Paul Walton,DEM,03,0,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Absentee by Mail Votes,Paul Walton,DEM,04,0,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Absentee by Mail Votes,Paul Walton,DEM,05,2,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Absentee by Mail Votes,Paul Walton,DEM,08,2,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Absentee by Mail Votes,Paul Walton,DEM,13,5,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Absentee by Mail Votes,Paul Walton,DEM,16,3,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Advance Voting Votes,Paul Walton,DEM,01,2,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Advance Voting Votes,Paul Walton,DEM,02,5,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Advance Voting Votes,Paul Walton,DEM,03,7,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Advance Voting Votes,Paul Walton,DEM,04,4,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Advance Voting Votes,Paul Walton,DEM,05,7,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Advance Voting Votes,Paul Walton,DEM,08,12,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Advance Voting Votes,Paul Walton,DEM,13,21,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Advance Voting Votes,Paul Walton,DEM,16,11,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Election Day Votes,Paul Walton,DEM,01,7,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Election Day Votes,Paul Walton,DEM,02,6,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Election Day Votes,Paul Walton,DEM,03,10,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Election Day Votes,Paul Walton,DEM,04,1,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Election Day Votes,Paul Walton,DEM,05,7,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Election Day Votes,Paul Walton,DEM,08,12,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Election Day Votes,Paul Walton,DEM,13,9,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Election Day Votes,Paul Walton,DEM,16,14,4.78,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,122,3338,Provisional Votes,Paul Walton,DEM,01,0,3.65,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,180,6752,Provisional Votes,Paul Walton,DEM,02,0,2.67,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,192,6178,Provisional Votes,Paul Walton,DEM,03,0,3.11,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,106,3735,Provisional Votes,Paul Walton,DEM,04,0,2.84,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,403,8518,Provisional Votes,Paul Walton,DEM,05,0,4.73,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,255,7095,Provisional Votes,Paul Walton,DEM,08,0,3.59,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,439,9374,Provisional Votes,Paul Walton,DEM,13,0,4.68,4,2022-06-10 13:34:02+00:00 -GA,Barrow,114737,US House of Representatives - District 10 - Dem,485,10137,Provisional Votes,Paul Walton,DEM,16,0,4.78,4,2022-06-10 13:34:02+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Absentee by Mail Votes,Jessica Allison Fore,DEM,1A,1,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Absentee by Mail Votes,Jessica Allison Fore,DEM,1B,5,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Absentee by Mail Votes,Jessica Allison Fore,DEM,1C,12,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Absentee by Mail Votes,Jessica Allison Fore,DEM,1D,16,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Absentee by Mail Votes,Jessica Allison Fore,DEM,2A,8,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Absentee by Mail Votes,Jessica Allison Fore,DEM,2B,4,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Absentee by Mail Votes,Jessica Allison Fore,DEM,3A,3,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Absentee by Mail Votes,Jessica Allison Fore,DEM,3B,6,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Absentee by Mail Votes,Jessica Allison Fore,DEM,4A,0,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Absentee by Mail Votes,Jessica Allison Fore,DEM,4B,3,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Absentee by Mail Votes,Jessica Allison Fore,DEM,5A,10,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Absentee by Mail Votes,Jessica Allison Fore,DEM,5B,3,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Absentee by Mail Votes,Jessica Allison Fore,DEM,5C,8,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Absentee by Mail Votes,Jessica Allison Fore,DEM,5D,3,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Absentee by Mail Votes,Jessica Allison Fore,DEM,6A,6,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Absentee by Mail Votes,Jessica Allison Fore,DEM,6B,14,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Absentee by Mail Votes,Jessica Allison Fore,DEM,6C,6,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Absentee by Mail Votes,Jessica Allison Fore,DEM,6D,1,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Absentee by Mail Votes,Jessica Allison Fore,DEM,7A,9,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Absentee by Mail Votes,Jessica Allison Fore,DEM,7B,18,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Absentee by Mail Votes,Jessica Allison Fore,DEM,7C,5,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Absentee by Mail Votes,Jessica Allison Fore,DEM,8A,16,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Absentee by Mail Votes,Jessica Allison Fore,DEM,8B,7,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Absentee by Mail Votes,Jessica Allison Fore,DEM,8C,8,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Advance Voting Votes,Jessica Allison Fore,DEM,1A,43,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Advance Voting Votes,Jessica Allison Fore,DEM,1B,57,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Advance Voting Votes,Jessica Allison Fore,DEM,1C,49,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Advance Voting Votes,Jessica Allison Fore,DEM,1D,63,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Advance Voting Votes,Jessica Allison Fore,DEM,2A,29,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Advance Voting Votes,Jessica Allison Fore,DEM,2B,45,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Advance Voting Votes,Jessica Allison Fore,DEM,3A,34,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Advance Voting Votes,Jessica Allison Fore,DEM,3B,43,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Advance Voting Votes,Jessica Allison Fore,DEM,4A,21,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Advance Voting Votes,Jessica Allison Fore,DEM,4B,49,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Advance Voting Votes,Jessica Allison Fore,DEM,5A,74,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Advance Voting Votes,Jessica Allison Fore,DEM,5B,43,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Advance Voting Votes,Jessica Allison Fore,DEM,5C,85,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Advance Voting Votes,Jessica Allison Fore,DEM,5D,29,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Advance Voting Votes,Jessica Allison Fore,DEM,6A,38,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Advance Voting Votes,Jessica Allison Fore,DEM,6B,63,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Advance Voting Votes,Jessica Allison Fore,DEM,6C,45,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Advance Voting Votes,Jessica Allison Fore,DEM,6D,28,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Advance Voting Votes,Jessica Allison Fore,DEM,7A,33,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Advance Voting Votes,Jessica Allison Fore,DEM,7B,57,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Advance Voting Votes,Jessica Allison Fore,DEM,7C,47,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Advance Voting Votes,Jessica Allison Fore,DEM,8A,60,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Advance Voting Votes,Jessica Allison Fore,DEM,8B,36,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Advance Voting Votes,Jessica Allison Fore,DEM,8C,34,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Election Day Votes,Jessica Allison Fore,DEM,1A,49,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Election Day Votes,Jessica Allison Fore,DEM,1B,44,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Election Day Votes,Jessica Allison Fore,DEM,1C,62,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Election Day Votes,Jessica Allison Fore,DEM,1D,91,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Election Day Votes,Jessica Allison Fore,DEM,2A,50,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Election Day Votes,Jessica Allison Fore,DEM,2B,60,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Election Day Votes,Jessica Allison Fore,DEM,3A,25,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Election Day Votes,Jessica Allison Fore,DEM,3B,47,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Election Day Votes,Jessica Allison Fore,DEM,4A,25,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Election Day Votes,Jessica Allison Fore,DEM,4B,51,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Election Day Votes,Jessica Allison Fore,DEM,5A,107,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Election Day Votes,Jessica Allison Fore,DEM,5B,61,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Election Day Votes,Jessica Allison Fore,DEM,5C,101,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Election Day Votes,Jessica Allison Fore,DEM,5D,48,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Election Day Votes,Jessica Allison Fore,DEM,6A,45,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Election Day Votes,Jessica Allison Fore,DEM,6B,62,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Election Day Votes,Jessica Allison Fore,DEM,6C,50,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Election Day Votes,Jessica Allison Fore,DEM,6D,47,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Election Day Votes,Jessica Allison Fore,DEM,7A,39,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Election Day Votes,Jessica Allison Fore,DEM,7B,33,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Election Day Votes,Jessica Allison Fore,DEM,7C,41,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Election Day Votes,Jessica Allison Fore,DEM,8A,74,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Election Day Votes,Jessica Allison Fore,DEM,8B,44,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Election Day Votes,Jessica Allison Fore,DEM,8C,50,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Provisional Votes,Jessica Allison Fore,DEM,1A,0,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Provisional Votes,Jessica Allison Fore,DEM,1B,0,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Provisional Votes,Jessica Allison Fore,DEM,1C,0,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Provisional Votes,Jessica Allison Fore,DEM,1D,0,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Provisional Votes,Jessica Allison Fore,DEM,2A,0,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Provisional Votes,Jessica Allison Fore,DEM,2B,0,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Provisional Votes,Jessica Allison Fore,DEM,3A,0,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Provisional Votes,Jessica Allison Fore,DEM,3B,0,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Provisional Votes,Jessica Allison Fore,DEM,4A,0,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Provisional Votes,Jessica Allison Fore,DEM,4B,0,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Provisional Votes,Jessica Allison Fore,DEM,5A,0,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Provisional Votes,Jessica Allison Fore,DEM,5B,1,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Provisional Votes,Jessica Allison Fore,DEM,5C,0,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Provisional Votes,Jessica Allison Fore,DEM,5D,0,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Provisional Votes,Jessica Allison Fore,DEM,6A,0,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Provisional Votes,Jessica Allison Fore,DEM,6B,0,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Provisional Votes,Jessica Allison Fore,DEM,6C,0,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Provisional Votes,Jessica Allison Fore,DEM,6D,0,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Provisional Votes,Jessica Allison Fore,DEM,7A,0,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Provisional Votes,Jessica Allison Fore,DEM,7B,1,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Provisional Votes,Jessica Allison Fore,DEM,7C,0,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Provisional Votes,Jessica Allison Fore,DEM,8A,0,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Provisional Votes,Jessica Allison Fore,DEM,8B,0,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Provisional Votes,Jessica Allison Fore,DEM,8C,0,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,1A,1,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,1B,14,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,1C,12,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,1D,11,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,2A,20,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,2B,20,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,3A,24,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,3B,11,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,4A,5,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,4B,6,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,5A,13,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,5B,17,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,5C,4,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,5D,9,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,6A,13,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,6B,28,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,6C,19,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,6D,8,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,7A,17,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,7B,14,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,7C,7,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,8A,14,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,8B,5,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Absentee by Mail Votes,Tabitha Johnson-Green,DEM,8C,9,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Advance Voting Votes,Tabitha Johnson-Green,DEM,1A,64,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Advance Voting Votes,Tabitha Johnson-Green,DEM,1B,144,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Advance Voting Votes,Tabitha Johnson-Green,DEM,1C,87,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Advance Voting Votes,Tabitha Johnson-Green,DEM,1D,92,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Advance Voting Votes,Tabitha Johnson-Green,DEM,2A,91,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Advance Voting Votes,Tabitha Johnson-Green,DEM,2B,140,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Advance Voting Votes,Tabitha Johnson-Green,DEM,3A,69,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Advance Voting Votes,Tabitha Johnson-Green,DEM,3B,79,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Advance Voting Votes,Tabitha Johnson-Green,DEM,4A,38,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Advance Voting Votes,Tabitha Johnson-Green,DEM,4B,69,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Advance Voting Votes,Tabitha Johnson-Green,DEM,5A,117,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Advance Voting Votes,Tabitha Johnson-Green,DEM,5B,83,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Advance Voting Votes,Tabitha Johnson-Green,DEM,5C,81,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Advance Voting Votes,Tabitha Johnson-Green,DEM,5D,69,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Advance Voting Votes,Tabitha Johnson-Green,DEM,6A,100,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Advance Voting Votes,Tabitha Johnson-Green,DEM,6B,100,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Advance Voting Votes,Tabitha Johnson-Green,DEM,6C,80,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Advance Voting Votes,Tabitha Johnson-Green,DEM,6D,58,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Advance Voting Votes,Tabitha Johnson-Green,DEM,7A,80,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Advance Voting Votes,Tabitha Johnson-Green,DEM,7B,105,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Advance Voting Votes,Tabitha Johnson-Green,DEM,7C,56,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Advance Voting Votes,Tabitha Johnson-Green,DEM,8A,91,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Advance Voting Votes,Tabitha Johnson-Green,DEM,8B,57,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Advance Voting Votes,Tabitha Johnson-Green,DEM,8C,92,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Election Day Votes,Tabitha Johnson-Green,DEM,1A,98,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Election Day Votes,Tabitha Johnson-Green,DEM,1B,108,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Election Day Votes,Tabitha Johnson-Green,DEM,1C,74,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Election Day Votes,Tabitha Johnson-Green,DEM,1D,131,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Election Day Votes,Tabitha Johnson-Green,DEM,2A,136,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Election Day Votes,Tabitha Johnson-Green,DEM,2B,181,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Election Day Votes,Tabitha Johnson-Green,DEM,3A,106,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Election Day Votes,Tabitha Johnson-Green,DEM,3B,74,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Election Day Votes,Tabitha Johnson-Green,DEM,4A,34,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Election Day Votes,Tabitha Johnson-Green,DEM,4B,78,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Election Day Votes,Tabitha Johnson-Green,DEM,5A,141,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Election Day Votes,Tabitha Johnson-Green,DEM,5B,142,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Election Day Votes,Tabitha Johnson-Green,DEM,5C,130,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Election Day Votes,Tabitha Johnson-Green,DEM,5D,136,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Election Day Votes,Tabitha Johnson-Green,DEM,6A,93,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Election Day Votes,Tabitha Johnson-Green,DEM,6B,120,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Election Day Votes,Tabitha Johnson-Green,DEM,6C,86,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Election Day Votes,Tabitha Johnson-Green,DEM,6D,101,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Election Day Votes,Tabitha Johnson-Green,DEM,7A,95,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Election Day Votes,Tabitha Johnson-Green,DEM,7B,64,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Election Day Votes,Tabitha Johnson-Green,DEM,7C,59,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Election Day Votes,Tabitha Johnson-Green,DEM,8A,101,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Election Day Votes,Tabitha Johnson-Green,DEM,8B,69,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Election Day Votes,Tabitha Johnson-Green,DEM,8C,115,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Provisional Votes,Tabitha Johnson-Green,DEM,1A,0,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Provisional Votes,Tabitha Johnson-Green,DEM,1B,0,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Provisional Votes,Tabitha Johnson-Green,DEM,1C,0,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Provisional Votes,Tabitha Johnson-Green,DEM,1D,0,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Provisional Votes,Tabitha Johnson-Green,DEM,2A,0,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Provisional Votes,Tabitha Johnson-Green,DEM,2B,1,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Provisional Votes,Tabitha Johnson-Green,DEM,3A,0,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Provisional Votes,Tabitha Johnson-Green,DEM,3B,0,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Provisional Votes,Tabitha Johnson-Green,DEM,4A,0,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Provisional Votes,Tabitha Johnson-Green,DEM,4B,0,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Provisional Votes,Tabitha Johnson-Green,DEM,5A,0,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Provisional Votes,Tabitha Johnson-Green,DEM,5B,0,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Provisional Votes,Tabitha Johnson-Green,DEM,5C,0,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Provisional Votes,Tabitha Johnson-Green,DEM,5D,0,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Provisional Votes,Tabitha Johnson-Green,DEM,6A,0,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Provisional Votes,Tabitha Johnson-Green,DEM,6B,0,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Provisional Votes,Tabitha Johnson-Green,DEM,6C,0,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Provisional Votes,Tabitha Johnson-Green,DEM,6D,0,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Provisional Votes,Tabitha Johnson-Green,DEM,7A,0,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Provisional Votes,Tabitha Johnson-Green,DEM,7B,1,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Provisional Votes,Tabitha Johnson-Green,DEM,7C,0,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Provisional Votes,Tabitha Johnson-Green,DEM,8A,0,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Provisional Votes,Tabitha Johnson-Green,DEM,8B,0,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Provisional Votes,Tabitha Johnson-Green,DEM,8C,0,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Absentee by Mail Votes,Phyllis Hatcher,DEM,1A,3,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Absentee by Mail Votes,Phyllis Hatcher,DEM,1B,4,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Absentee by Mail Votes,Phyllis Hatcher,DEM,1C,6,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Absentee by Mail Votes,Phyllis Hatcher,DEM,1D,7,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Absentee by Mail Votes,Phyllis Hatcher,DEM,2A,6,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Absentee by Mail Votes,Phyllis Hatcher,DEM,2B,5,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Absentee by Mail Votes,Phyllis Hatcher,DEM,3A,1,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Absentee by Mail Votes,Phyllis Hatcher,DEM,3B,5,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Absentee by Mail Votes,Phyllis Hatcher,DEM,4A,0,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Absentee by Mail Votes,Phyllis Hatcher,DEM,4B,11,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Absentee by Mail Votes,Phyllis Hatcher,DEM,5A,3,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Absentee by Mail Votes,Phyllis Hatcher,DEM,5B,14,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Absentee by Mail Votes,Phyllis Hatcher,DEM,5C,0,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Absentee by Mail Votes,Phyllis Hatcher,DEM,5D,7,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Absentee by Mail Votes,Phyllis Hatcher,DEM,6A,5,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Absentee by Mail Votes,Phyllis Hatcher,DEM,6B,22,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Absentee by Mail Votes,Phyllis Hatcher,DEM,6C,4,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Absentee by Mail Votes,Phyllis Hatcher,DEM,6D,4,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Absentee by Mail Votes,Phyllis Hatcher,DEM,7A,6,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Absentee by Mail Votes,Phyllis Hatcher,DEM,7B,4,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Absentee by Mail Votes,Phyllis Hatcher,DEM,7C,3,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Absentee by Mail Votes,Phyllis Hatcher,DEM,8A,8,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Absentee by Mail Votes,Phyllis Hatcher,DEM,8B,11,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Absentee by Mail Votes,Phyllis Hatcher,DEM,8C,11,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Advance Voting Votes,Phyllis Hatcher,DEM,1A,26,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Advance Voting Votes,Phyllis Hatcher,DEM,1B,48,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Advance Voting Votes,Phyllis Hatcher,DEM,1C,30,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Advance Voting Votes,Phyllis Hatcher,DEM,1D,31,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Advance Voting Votes,Phyllis Hatcher,DEM,2A,36,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Advance Voting Votes,Phyllis Hatcher,DEM,2B,37,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Advance Voting Votes,Phyllis Hatcher,DEM,3A,17,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Advance Voting Votes,Phyllis Hatcher,DEM,3B,30,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Advance Voting Votes,Phyllis Hatcher,DEM,4A,10,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Advance Voting Votes,Phyllis Hatcher,DEM,4B,38,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Advance Voting Votes,Phyllis Hatcher,DEM,5A,24,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Advance Voting Votes,Phyllis Hatcher,DEM,5B,27,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Advance Voting Votes,Phyllis Hatcher,DEM,5C,23,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Advance Voting Votes,Phyllis Hatcher,DEM,5D,15,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Advance Voting Votes,Phyllis Hatcher,DEM,6A,31,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Advance Voting Votes,Phyllis Hatcher,DEM,6B,47,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Advance Voting Votes,Phyllis Hatcher,DEM,6C,30,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Advance Voting Votes,Phyllis Hatcher,DEM,6D,14,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Advance Voting Votes,Phyllis Hatcher,DEM,7A,29,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Advance Voting Votes,Phyllis Hatcher,DEM,7B,30,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Advance Voting Votes,Phyllis Hatcher,DEM,7C,18,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Advance Voting Votes,Phyllis Hatcher,DEM,8A,28,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Advance Voting Votes,Phyllis Hatcher,DEM,8B,17,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Advance Voting Votes,Phyllis Hatcher,DEM,8C,36,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Election Day Votes,Phyllis Hatcher,DEM,1A,29,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Election Day Votes,Phyllis Hatcher,DEM,1B,29,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Election Day Votes,Phyllis Hatcher,DEM,1C,32,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Election Day Votes,Phyllis Hatcher,DEM,1D,39,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Election Day Votes,Phyllis Hatcher,DEM,2A,47,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Election Day Votes,Phyllis Hatcher,DEM,2B,28,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Election Day Votes,Phyllis Hatcher,DEM,3A,29,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Election Day Votes,Phyllis Hatcher,DEM,3B,25,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Election Day Votes,Phyllis Hatcher,DEM,4A,19,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Election Day Votes,Phyllis Hatcher,DEM,4B,29,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Election Day Votes,Phyllis Hatcher,DEM,5A,29,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Election Day Votes,Phyllis Hatcher,DEM,5B,35,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Election Day Votes,Phyllis Hatcher,DEM,5C,32,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Election Day Votes,Phyllis Hatcher,DEM,5D,39,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Election Day Votes,Phyllis Hatcher,DEM,6A,28,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Election Day Votes,Phyllis Hatcher,DEM,6B,40,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Election Day Votes,Phyllis Hatcher,DEM,6C,31,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Election Day Votes,Phyllis Hatcher,DEM,6D,29,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Election Day Votes,Phyllis Hatcher,DEM,7A,30,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Election Day Votes,Phyllis Hatcher,DEM,7B,18,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Election Day Votes,Phyllis Hatcher,DEM,7C,18,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Election Day Votes,Phyllis Hatcher,DEM,8A,32,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Election Day Votes,Phyllis Hatcher,DEM,8B,31,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Election Day Votes,Phyllis Hatcher,DEM,8C,29,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Provisional Votes,Phyllis Hatcher,DEM,1A,0,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Provisional Votes,Phyllis Hatcher,DEM,1B,0,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Provisional Votes,Phyllis Hatcher,DEM,1C,0,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Provisional Votes,Phyllis Hatcher,DEM,1D,0,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Provisional Votes,Phyllis Hatcher,DEM,2A,0,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Provisional Votes,Phyllis Hatcher,DEM,2B,0,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Provisional Votes,Phyllis Hatcher,DEM,3A,0,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Provisional Votes,Phyllis Hatcher,DEM,3B,0,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Provisional Votes,Phyllis Hatcher,DEM,4A,0,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Provisional Votes,Phyllis Hatcher,DEM,4B,0,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Provisional Votes,Phyllis Hatcher,DEM,5A,0,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Provisional Votes,Phyllis Hatcher,DEM,5B,0,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Provisional Votes,Phyllis Hatcher,DEM,5C,0,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Provisional Votes,Phyllis Hatcher,DEM,5D,0,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Provisional Votes,Phyllis Hatcher,DEM,6A,0,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Provisional Votes,Phyllis Hatcher,DEM,6B,0,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Provisional Votes,Phyllis Hatcher,DEM,6C,0,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Provisional Votes,Phyllis Hatcher,DEM,6D,0,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Provisional Votes,Phyllis Hatcher,DEM,7A,0,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Provisional Votes,Phyllis Hatcher,DEM,7B,0,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Provisional Votes,Phyllis Hatcher,DEM,7C,0,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Provisional Votes,Phyllis Hatcher,DEM,8A,0,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Provisional Votes,Phyllis Hatcher,DEM,8B,0,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Provisional Votes,Phyllis Hatcher,DEM,8C,0,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Absentee by Mail Votes,Femi Oduwole,DEM,1A,1,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Absentee by Mail Votes,Femi Oduwole,DEM,1B,0,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Absentee by Mail Votes,Femi Oduwole,DEM,1C,2,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Absentee by Mail Votes,Femi Oduwole,DEM,1D,0,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Absentee by Mail Votes,Femi Oduwole,DEM,2A,1,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Absentee by Mail Votes,Femi Oduwole,DEM,2B,3,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Absentee by Mail Votes,Femi Oduwole,DEM,3A,1,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Absentee by Mail Votes,Femi Oduwole,DEM,3B,1,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Absentee by Mail Votes,Femi Oduwole,DEM,4A,2,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Absentee by Mail Votes,Femi Oduwole,DEM,4B,1,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Absentee by Mail Votes,Femi Oduwole,DEM,5A,2,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Absentee by Mail Votes,Femi Oduwole,DEM,5B,2,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Absentee by Mail Votes,Femi Oduwole,DEM,5C,1,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Absentee by Mail Votes,Femi Oduwole,DEM,5D,3,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Absentee by Mail Votes,Femi Oduwole,DEM,6A,1,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Absentee by Mail Votes,Femi Oduwole,DEM,6B,3,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Absentee by Mail Votes,Femi Oduwole,DEM,6C,2,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Absentee by Mail Votes,Femi Oduwole,DEM,6D,1,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Absentee by Mail Votes,Femi Oduwole,DEM,7A,3,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Absentee by Mail Votes,Femi Oduwole,DEM,7B,0,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Absentee by Mail Votes,Femi Oduwole,DEM,7C,0,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Absentee by Mail Votes,Femi Oduwole,DEM,8A,2,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Absentee by Mail Votes,Femi Oduwole,DEM,8B,1,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Absentee by Mail Votes,Femi Oduwole,DEM,8C,3,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Advance Voting Votes,Femi Oduwole,DEM,1A,13,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Advance Voting Votes,Femi Oduwole,DEM,1B,21,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Advance Voting Votes,Femi Oduwole,DEM,1C,7,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Advance Voting Votes,Femi Oduwole,DEM,1D,15,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Advance Voting Votes,Femi Oduwole,DEM,2A,14,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Advance Voting Votes,Femi Oduwole,DEM,2B,19,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Advance Voting Votes,Femi Oduwole,DEM,3A,15,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Advance Voting Votes,Femi Oduwole,DEM,3B,23,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Advance Voting Votes,Femi Oduwole,DEM,4A,5,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Advance Voting Votes,Femi Oduwole,DEM,4B,11,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Advance Voting Votes,Femi Oduwole,DEM,5A,20,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Advance Voting Votes,Femi Oduwole,DEM,5B,18,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Advance Voting Votes,Femi Oduwole,DEM,5C,18,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Advance Voting Votes,Femi Oduwole,DEM,5D,16,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Advance Voting Votes,Femi Oduwole,DEM,6A,20,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Advance Voting Votes,Femi Oduwole,DEM,6B,11,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Advance Voting Votes,Femi Oduwole,DEM,6C,17,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Advance Voting Votes,Femi Oduwole,DEM,6D,6,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Advance Voting Votes,Femi Oduwole,DEM,7A,21,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Advance Voting Votes,Femi Oduwole,DEM,7B,15,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Advance Voting Votes,Femi Oduwole,DEM,7C,8,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Advance Voting Votes,Femi Oduwole,DEM,8A,19,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Advance Voting Votes,Femi Oduwole,DEM,8B,9,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Advance Voting Votes,Femi Oduwole,DEM,8C,17,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Election Day Votes,Femi Oduwole,DEM,1A,17,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Election Day Votes,Femi Oduwole,DEM,1B,30,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Election Day Votes,Femi Oduwole,DEM,1C,20,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Election Day Votes,Femi Oduwole,DEM,1D,27,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Election Day Votes,Femi Oduwole,DEM,2A,21,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Election Day Votes,Femi Oduwole,DEM,2B,34,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Election Day Votes,Femi Oduwole,DEM,3A,18,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Election Day Votes,Femi Oduwole,DEM,3B,10,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Election Day Votes,Femi Oduwole,DEM,4A,5,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Election Day Votes,Femi Oduwole,DEM,4B,28,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Election Day Votes,Femi Oduwole,DEM,5A,38,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Election Day Votes,Femi Oduwole,DEM,5B,25,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Election Day Votes,Femi Oduwole,DEM,5C,25,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Election Day Votes,Femi Oduwole,DEM,5D,24,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Election Day Votes,Femi Oduwole,DEM,6A,12,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Election Day Votes,Femi Oduwole,DEM,6B,20,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Election Day Votes,Femi Oduwole,DEM,6C,15,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Election Day Votes,Femi Oduwole,DEM,6D,18,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Election Day Votes,Femi Oduwole,DEM,7A,19,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Election Day Votes,Femi Oduwole,DEM,7B,25,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Election Day Votes,Femi Oduwole,DEM,7C,14,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Election Day Votes,Femi Oduwole,DEM,8A,27,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Election Day Votes,Femi Oduwole,DEM,8B,18,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Election Day Votes,Femi Oduwole,DEM,8C,27,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Provisional Votes,Femi Oduwole,DEM,1A,0,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Provisional Votes,Femi Oduwole,DEM,1B,0,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Provisional Votes,Femi Oduwole,DEM,1C,0,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Provisional Votes,Femi Oduwole,DEM,1D,0,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Provisional Votes,Femi Oduwole,DEM,2A,0,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Provisional Votes,Femi Oduwole,DEM,2B,0,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Provisional Votes,Femi Oduwole,DEM,3A,0,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Provisional Votes,Femi Oduwole,DEM,3B,0,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Provisional Votes,Femi Oduwole,DEM,4A,0,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Provisional Votes,Femi Oduwole,DEM,4B,0,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Provisional Votes,Femi Oduwole,DEM,5A,0,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Provisional Votes,Femi Oduwole,DEM,5B,0,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Provisional Votes,Femi Oduwole,DEM,5C,0,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Provisional Votes,Femi Oduwole,DEM,5D,0,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Provisional Votes,Femi Oduwole,DEM,6A,0,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Provisional Votes,Femi Oduwole,DEM,6B,0,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Provisional Votes,Femi Oduwole,DEM,6C,0,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Provisional Votes,Femi Oduwole,DEM,6D,0,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Provisional Votes,Femi Oduwole,DEM,7A,0,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Provisional Votes,Femi Oduwole,DEM,7B,0,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Provisional Votes,Femi Oduwole,DEM,7C,0,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Provisional Votes,Femi Oduwole,DEM,8A,0,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Provisional Votes,Femi Oduwole,DEM,8B,0,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Provisional Votes,Femi Oduwole,DEM,8C,0,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Absentee by Mail Votes,Paul Walton,DEM,1A,0,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Absentee by Mail Votes,Paul Walton,DEM,1B,6,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Absentee by Mail Votes,Paul Walton,DEM,1C,5,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Absentee by Mail Votes,Paul Walton,DEM,1D,5,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Absentee by Mail Votes,Paul Walton,DEM,2A,8,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Absentee by Mail Votes,Paul Walton,DEM,2B,4,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Absentee by Mail Votes,Paul Walton,DEM,3A,5,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Absentee by Mail Votes,Paul Walton,DEM,3B,6,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Absentee by Mail Votes,Paul Walton,DEM,4A,3,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Absentee by Mail Votes,Paul Walton,DEM,4B,2,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Absentee by Mail Votes,Paul Walton,DEM,5A,3,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Absentee by Mail Votes,Paul Walton,DEM,5B,2,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Absentee by Mail Votes,Paul Walton,DEM,5C,1,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Absentee by Mail Votes,Paul Walton,DEM,5D,1,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Absentee by Mail Votes,Paul Walton,DEM,6A,3,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Absentee by Mail Votes,Paul Walton,DEM,6B,5,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Absentee by Mail Votes,Paul Walton,DEM,6C,5,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Absentee by Mail Votes,Paul Walton,DEM,6D,0,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Absentee by Mail Votes,Paul Walton,DEM,7A,5,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Absentee by Mail Votes,Paul Walton,DEM,7B,3,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Absentee by Mail Votes,Paul Walton,DEM,7C,3,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Absentee by Mail Votes,Paul Walton,DEM,8A,3,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Absentee by Mail Votes,Paul Walton,DEM,8B,4,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Absentee by Mail Votes,Paul Walton,DEM,8C,3,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Advance Voting Votes,Paul Walton,DEM,1A,22,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Advance Voting Votes,Paul Walton,DEM,1B,20,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Advance Voting Votes,Paul Walton,DEM,1C,15,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Advance Voting Votes,Paul Walton,DEM,1D,28,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Advance Voting Votes,Paul Walton,DEM,2A,17,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Advance Voting Votes,Paul Walton,DEM,2B,21,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Advance Voting Votes,Paul Walton,DEM,3A,16,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Advance Voting Votes,Paul Walton,DEM,3B,7,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Advance Voting Votes,Paul Walton,DEM,4A,9,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Advance Voting Votes,Paul Walton,DEM,4B,4,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Advance Voting Votes,Paul Walton,DEM,5A,14,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Advance Voting Votes,Paul Walton,DEM,5B,17,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Advance Voting Votes,Paul Walton,DEM,5C,18,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Advance Voting Votes,Paul Walton,DEM,5D,13,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Advance Voting Votes,Paul Walton,DEM,6A,12,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Advance Voting Votes,Paul Walton,DEM,6B,27,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Advance Voting Votes,Paul Walton,DEM,6C,21,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Advance Voting Votes,Paul Walton,DEM,6D,10,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Advance Voting Votes,Paul Walton,DEM,7A,14,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Advance Voting Votes,Paul Walton,DEM,7B,20,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Advance Voting Votes,Paul Walton,DEM,7C,9,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Advance Voting Votes,Paul Walton,DEM,8A,16,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Advance Voting Votes,Paul Walton,DEM,8B,8,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Advance Voting Votes,Paul Walton,DEM,8C,15,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Election Day Votes,Paul Walton,DEM,1A,25,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Election Day Votes,Paul Walton,DEM,1B,17,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Election Day Votes,Paul Walton,DEM,1C,11,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Election Day Votes,Paul Walton,DEM,1D,28,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Election Day Votes,Paul Walton,DEM,2A,34,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Election Day Votes,Paul Walton,DEM,2B,26,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Election Day Votes,Paul Walton,DEM,3A,11,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Election Day Votes,Paul Walton,DEM,3B,11,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Election Day Votes,Paul Walton,DEM,4A,6,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Election Day Votes,Paul Walton,DEM,4B,17,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Election Day Votes,Paul Walton,DEM,5A,17,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Election Day Votes,Paul Walton,DEM,5B,21,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Election Day Votes,Paul Walton,DEM,5C,18,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Election Day Votes,Paul Walton,DEM,5D,18,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Election Day Votes,Paul Walton,DEM,6A,11,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Election Day Votes,Paul Walton,DEM,6B,18,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Election Day Votes,Paul Walton,DEM,6C,15,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Election Day Votes,Paul Walton,DEM,6D,21,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Election Day Votes,Paul Walton,DEM,7A,21,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Election Day Votes,Paul Walton,DEM,7B,25,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Election Day Votes,Paul Walton,DEM,7C,14,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Election Day Votes,Paul Walton,DEM,8A,21,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Election Day Votes,Paul Walton,DEM,8B,13,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Election Day Votes,Paul Walton,DEM,8C,17,15.92,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,459,2963,Provisional Votes,Paul Walton,DEM,1A,0,15.49,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,613,3515,Provisional Votes,Paul Walton,DEM,1B,0,17.44,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,486,2249,Provisional Votes,Paul Walton,DEM,1C,0,21.61,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,668,2639,Provisional Votes,Paul Walton,DEM,1D,0,25.31,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,596,4148,Provisional Votes,Paul Walton,DEM,2A,0,14.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,736,4720,Provisional Votes,Paul Walton,DEM,2B,0,15.59,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,430,2611,Provisional Votes,Paul Walton,DEM,3A,0,16.47,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,439,3319,Provisional Votes,Paul Walton,DEM,3B,0,13.23,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,202,2056,Provisional Votes,Paul Walton,DEM,4A,0,9.82,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,467,3387,Provisional Votes,Paul Walton,DEM,4B,0,13.79,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,725,2841,Provisional Votes,Paul Walton,DEM,5A,0,25.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,565,2912,Provisional Votes,Paul Walton,DEM,5B,0,19.40,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,648,1886,Provisional Votes,Paul Walton,DEM,5C,0,34.36,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3020,Provisional Votes,Paul Walton,DEM,5D,0,16.42,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,480,3854,Provisional Votes,Paul Walton,DEM,6A,0,12.45,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,660,3768,Provisional Votes,Paul Walton,DEM,6B,0,17.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,496,3072,Provisional Votes,Paul Walton,DEM,6C,0,16.15,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,415,1639,Provisional Votes,Paul Walton,DEM,6D,0,25.32,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,493,2740,Provisional Votes,Paul Walton,DEM,7A,0,17.99,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,515,2965,Provisional Votes,Paul Walton,DEM,7B,0,17.37,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,387,2342,Provisional Votes,Paul Walton,DEM,7C,0,16.52,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,570,3088,Provisional Votes,Paul Walton,DEM,8A,0,18.46,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,380,2162,Provisional Votes,Paul Walton,DEM,8B,0,17.58,4,2022-06-13 17:52:28+00:00 -GA,Clarke,114759,US House of Representatives - District 10 - Dem,550,3454,Provisional Votes,Paul Walton,DEM,8C,1,15.92,4,2022-06-13 17:52:28+00:00 diff --git a/test/test_scytl/114729_summary.zip b/test/test_scytl/114729_summary.zip deleted file mode 100644 index d145a6934b..0000000000 Binary files a/test/test_scytl/114729_summary.zip and /dev/null differ diff --git a/test/test_scytl/114729_summary_expected.csv b/test/test_scytl/114729_summary_expected.csv deleted file mode 100644 index e257e74ddb..0000000000 --- a/test/test_scytl/114729_summary_expected.csv +++ /dev/null @@ -1,6 +0,0 @@ -state,county_name,office,ballots_cast,reg_voters,counties_reporting,total_counties,precincts_reporting,total_precincts,candidate_name,candidate_party,recorded_votes -GA,,US House of Representatives - District 10 - Dem (Vote For 1),42138,528466,,,18,18,Jessica Allison Fore,DEM,7261 -GA,,US House of Representatives - District 10 - Dem (Vote For 1),42138,528466,,,18,18,Tabitha Johnson-Green,DEM,15826 -GA,,US House of Representatives - District 10 - Dem (Vote For 1),42138,528466,,,18,18,Phyllis Hatcher,DEM,7118 -GA,,US House of Representatives - District 10 - Dem (Vote For 1),42138,528466,,,18,18,Femi Oduwole,DEM,4425 -GA,,US House of Representatives - District 10 - Dem (Vote For 1),42138,528466,,,18,18,Paul Walton,DEM,3080 diff --git a/test/test_scytl/GA_114729_296262_county_election_settings.json b/test/test_scytl/GA_114729_296262_county_election_settings.json deleted file mode 100644 index 713c485c4d..0000000000 --- a/test/test_scytl/GA_114729_296262_county_election_settings.json +++ /dev/null @@ -1,673 +0,0 @@ -{ - "settings": { - "hasvotecenters": null, - "totalprecinctsreporting": null, - "numberofprecinctsreporting": null, - "winners": {}, - "recountthreshold": 1, - "runoffthreshold": 1, - "winnerthreshold": 0, - "supportedlanguages": "en|English", - "timezoneoffset": 0, - "kmlmap": "3d36ea65-ca11-4541-a4ae-50dcbd2ec03d.json", - "isdistrictmaps": false, - "districtmaps": [], - "isdaylightsaving": true, - "precinctsreporting": {}, - "votecenters": null, - "partycolors": { - "14": "|#663399", - "10": "|#FF6688", - "8": "|#0000FF", - "9": "|#008080", - "11": "|#990055", - "2": "LIB|#99FFFF", - "3": "DEM|#0000FF", - "0": "REP|#FF0000", - "1": "NP|#FF6600", - "6": "|#FFCC33", - "7": "|#33FFFF", - "4": "|#FF0000", - "15": "|#87CEEB", - "12": "|#663300", - "13": "|#009700", - "5": "|#FF6600" - }, - "electiondetails": { - "latitude": "32.662235538822465", - "liveversionid": "-1", - "lulogid": "6322", - "prmapwidth": "400", - "usestandardcolors": "1", - "pindownloadlinks": "0", - "chkgeneratexls": true, - "showcontestdetailmapmobile": "1", - "chkgeneratesummary": true, - "showprecinctsrptg": "1", - "showareasreportingmap": "1", - "filterbyparty": "0", - "showcontestswidget": "0", - "isdistrictmaps": "0", - "showpartiallyrptheader": "0", - "nomenu": true, - "widgetlegendpartiallyreported": "1", - "issummaryupload": false, - "downloadreport": "Download report", - "filterbyprecinct": "1", - "showtotalnumbervotes": "1", - "positionresourceswidget": "13", - "registeredvoters": "0", - "showpercentageheader": "1", - "chkgeneratexlswithoutvotetypes": false, - "pinballotcounting": "0", - "showresourceswidget": "0", - "showprecinctlist": "1", - "showstateareasreportingmap": "1", - "showareasreportingmapmobile": "1", - "chkgeneratetxt": true, - "longitude": "-83.31013593750004", - "ismanualentry": "0", - "filetype": 8192, - "showstatecontestdetailmap": "1", - "showvoteresults": "1", - "isvotetypemodelunderovervotes": false, - "choicesortorder": "0", - "pinturnoutpartywidget": "0", - "pinresourceswidget": "0", - "tweetcount": "10", - "chkshowmaponvoterturnout": true, - "showprecinctsrptdetailspercontest": "0", - "showprecinctsreportingstatewidget": "1", - "internalname": "May 24, 2022 - US Congressional District 10 Primary (Dem) Recount", - "pinvoterturnoutwidget": "0", - "showregisteredvoters": "1", - "mapzoom": "7", - "coloradomapstatuses": "0", - "showturnoutvotetypeprecinctstab": "0", - "cumulativevoting": "0", - "showvotefor": "1", - "filterbydistrict": "0", - "electiondate": "6/10/2022", - "showvotetypedetails": "1", - "showdownloadreportbuttononfilterpage": "0", - "showareasrptgchart": "1", - "showballotcast": "0", - "chkgeneratepdf": false, - "filterbyvotetype": "1", - "googleanalyticsidglobal": "UA-3425620-3", - "visible": "1", - "showstraightparty": "0", - "showstateclicktoseemap": "1", - "showturnoutbyvotetype": "0", - "iscontestnochoices": false, - "chkgeneratexml": true, - "showmyfavoriteraces": "1", - "pinpostonyourwebsitewidget": "0", - "noresultsonfilters": "No results found.", - "showvotesheader": "1", - "showpostonyourwebsitewidget": "0", - "chkgeneratexmlwithoutvotetypes": false, - "truebigimagewidth": 0, - "showprintbutton": "1", - "showprecinctrptglink": "1", - "zoomlevel": "2", - "showturnoutmapmobile": "1", - "truebigimageheight": 0, - "showcompletelyrptheader": "0", - "showstateturnoutmap": "1", - "showvoterturnoutwidget": "0", - "showturnoutbyparty": "0", - "chkoverrideregisterdvotersonreport": false, - "showselectcounty": "1", - "nocontestavailable": "There\u2019s no contest available yet, check this category again later.", - "pintwitterwidget": "0", - "showdownloadlinks": "1", - "electionid": "114729", - "trueimageheight": 0, - "pinvotecenterswidget": "0", - "positiontwitterwidget": "12", - "showvoterturnoutlinks": "0", - "mainlanguage": "en", - "isstraightpartycontestinresults": false, - "chkoverrideballotscastonreport": false, - "scrollcarousel": "0", - "pinareasrptgchart": "0", - "googleanalyticsid": "UA-3425620-15", - "selectcounty": "Select County", - "tweetaccount": "", - "showstateaccesstocountiesmap": "1", - "showpercentrptheader": "0", - "backtostate": "back to state", - "showstatevotetypetab": "1", - "heightmap": "480", - "shownewswidget": "0", - "pinstraightparty": "0", - "parententity_internalname": "", - "cumulativechoicetext": "", - "showprecinctvotetypedetails": "1", - "isstate": true, - "showmaponvoterturnout": "0", - "showballotscast": "1", - "showadvancedreportslinks": "1", - "ballotscast": "0", - "pinturnoutbyvotetype": "0", - "showcumulativelegend": "1", - "chkisfinalupload": false, - "pincontestswidget": "0", - "showvoterturnoutbyparty": "0", - "isstateelection": "1", - "maptype": "0", - "pinnewswidget": "0", - "showpartycolors": "1", - "showprecinctspercontest": "0", - "participatingcounties": [ - "Barrow|114737|295501|6/10/2022 9:34:02 AM EDT|16", - "Clarke|114759|295888|6/13/2022 1:52:28 PM EDT|16" - ], - "showheadermessagewidget": "1", - "parentelectionid": "0", - "showvotersregisteredpercontest": "0", - "chkiscanvasupload": false, - "showtwitterwidget": "0", - "istestmode": false, - "prmapheight": "400", - "entityid": "605", - "trueimagewidth": 0, - "showvotersturnoutpercontest": "0" - }, - "dbfcolumn": 0, - "tabsystem": 8192, - "maptype": 1 - }, - "sequoiaelectiondetails": { - "registeredvoters": 0, - "ballotscast": 0 - }, - "pagesettings": { - "global settings": { - "vote for": "Vote For", - "status": "Status", - "download reports link": "Reports", - "partially reported": "Partially Reported", - "bar percentage": "", - "has not reported": "Has Not Reported", - "reporting": "Reporting", - "web site last updated": "Last updated", - "color": "Color", - "canvassed": "", - "choice": "Choice", - "submit": "Submit", - "election name": "", - "cumulative": "Cumulative", - "voter turnout": "Voter Turnout", - "of": "of", - "votes": "Votes", - "search contests": "Search", - "voter turnout link": "Voter Turnout", - "summary": "Summary", - "next": "Next", - "map": "Map", - "areas reporting link": "Precincts Reporting", - "participating": "Participating", - "area": "County", - "view full site": "View Full ENR Site", - "scripts": "", - "template header": "Month DD, YYYY Election name", - "close this window": "Close This Window", - "registered voters": "Registered Voters", - "print this page": "Print This Page", - "percent": "Percent", - "tie": "Tie", - "filters": "Filters", - "results not available": "Election results are currently not available. Please try again later.", - "mobile header": "
UNOFFICIAL RESULTS
", - "select county label": "Results by County", - "select all": "Select All", - "header": "
UNOFFICIAL RESULTS
", - "view enlarged map link": "View Enlarged Map", - "not participating": "Not Participating", - "detail": "Detail", - "choose language": "Choose Language  ", - "select area": "Results by County", - "areas": "Precincts", - "cancel button": "Cancel", - "go to page": "Go To Page", - "state wide": "Statewide Results", - "view all data": "View All Data", - "previous": "Previous", - "display": "Display", - "election results": "", - "mobile menu": "Menu", - "percent reported": "Percent Reported", - "top of page": "Top of Page", - "total": "Total", - "mobile template header": "County Office Name", - "ballots cast": "Ballots Cast", - "search button": "", - "completely reported": "Completely Reported", - "ballot oval selection": "", - "mobile election name": "Month DD, YYYY Election name" - }, - "summary page": { - "all": "ALL", - "recount": "Recount", - "precincts reporting": "Precincts Reporting", - "vote type legend help": "(click the types in the legend above to toggle their visibility)", - "winner": "Winner", - "vote type tabular icon": "Vote Type Tabular", - "scripts": "", - "runoff": "Runoff", - "map detail icon": "Contest Detail Map", - "over votes": "", - "maps not available": "Maps are not available", - "under votes": "", - "detail icon": "Detail", - "vote type icon": "Vote type summary", - "precincts reporting icon": "Precincts Reporting", - "header": "" - }, - "areas rptg page": { - "header": "", - "area reporting table header": "Precincts Reporting", - "maps not available": "", - "scripts": "" - }, - "map page": { - "scripts": "", - "header": "" - }, - "vote type page": { - "scripts": "", - "header": "" - }, - "download rpt page": { - "type column header": "Type", - "type txt label": "Detail TXT", - "xlsvt description": "Precinct level details for election results", - "description column header": "Description", - "csv description": "Comma separated file showing total votes received.", - "xml description": "Precinct level details for election results. Contains votes received by choice in each contest for all participating precincts.", - "type xml label": "Detail XML", - "district header": "", - "xmlvt description": "Precinct level details for election results", - "district": "", - "custom header": "Custom Filter by Vote Type and Precinct", - "contest": "", - "report title": "", - "scripts": "", - "type xls label": "Detail XLS", - "type csv label": "Summary CSV", - "size column header": "Size", - "type xlsvt label": "Detailed XLS without Vote Types", - "download header": "Download Reports", - "txt description": "Precinct level details for election results. Contains votes received by choice in each contest for all participating precincts.", - "refresh btn label": "Refresh", - "type xmlvt label": "Detailed XML without Vote Types", - "download column header": "Download", - "xls description": "Precinct level details for election results. Contains votes received by choice in each contest for all participating precincts.", - "header": "", - "vote type label": "Vote Type" - }, - "tv results page": { - "template header": "", - "header": "", - "scripts": "" - }, - "slide show page": { - "scripts": "", - "header": "" - }, - "voter turnout page": { - "party": "", - "precincts participating": "", - "total ballots": "", - "precincts reporting": "", - "precincts reported": "", - "voter turnout by party not available": "", - "scripts": "", - "party turnout": "", - "table header": "Voter Turnout", - "registered voters": "", - "total election turnout": "", - "voter turnout by party icon": "", - "election turnout": "", - "voter turnout icon": "", - "maps not available": "Maps are not available", - "greater than": "Greater than 100%", - "header": "" - }, - "voter turnout data page": { - "header": "", - "scripts": "", - "table header": "Voter Turnout" - }, - "map data page": { - "header": "", - "detail": "Detail", - "scripts": "" - }, - "area rptg data page": { - "scripts": "", - "header": "" - }, - "social media": { - "facebook description": "", - "facebook appid": "236723726432666", - "facebook text": "", - "facebook title": "", - "twitter title": "Check out this election " - }, - "web": { - "legend_partiallyreported_county_precinctsreporting": "Precinct is reporting results but has not completely reported", - "label_tie_state_contestdetail": "Tie", - "show": "Show", - "total": "Total", - "nocontestavailable": "There\u2019s no contest available yet, check this category again later.", - "pt_partyturnout": "Party Turnout", - "styles": "", - "rptdescriptioncsv": "Comma separated file showing total votes received.", - "hide": "Hide", - "websitelastupdated": "Last updated", - "label_partiallyreported_state_countiesreporting": "Partially Reported", - "label_notparticipating_county_turnout": "Not Participating", - "searchbutton": "Search", - "legend_notparticipating_state_contestdetail": "County is not participating in this contest", - "typexlslabel": "Detail XLS", - "precinctdatahiddenmsg": "This information has been hidden because the precinct has less than 10 votes", - "linkturnoutvotetype": "See Turnout by Vote Type", - "label_completelyreported_county_votecenter": "Completely Reported", - "csvdescription": "Comma separated file showing total votes received.", - "vcreportinglinktext": "See Vote Centers Reporting", - "summary": "Results", - "foundresults": "results were found", - "legend_greaterthan_state_turnout": "Greater Than", - "charttype": "Chart type", - "sharecontesttitle": "Share Contest", - "clicktoseemap": "Click for Contest Details", - "label_electionnightcomplete_state_countiesreporting": "Election Night Completed", - "downloadreport": "Download report", - "viewasgrid": "View as grid", - "label_countycertifiedresults_state_countiesreporting": "County Certified Results", - "votetypedatahiddenna": "N/A", - "filters": "filters", - "legend_notreporting_county_contestdetail": "Precinct is participating in the election but has not reported results", - "ballotsprocessed": "Ballots Processed", - "completelyreported": "Completely Reported", - "emaildescription": "", - "sitetitle": "", - "mctooltip": "This contest combines results", - "default": "default", - "vcreportingheader": "Vote Centers Reporting", - "voterturnoutbypartynotavailable": "Voter Turnout By Party Not Available", - "legend_tie_state_contestdetail": "Tie", - "ballotcast": "Ballots Cast", - "postonyourwebsite": "Post on your website", - "backtostate": "back to state", - "resultsnotavailable": "Results Not Available", - "legend_notparticipating_county_precinctsreporting": "Precinct is not participating in the election", - "xmlvtdescription": "Precinct level details for election results", - "typexmlvtlabel": "Detailed XML without Vote Types", - "statewideresults": "Statewide Results", - "titleturnoutbyvotetype": "Turnout By Vote Type", - "label_completelyreported_county_precinctsreporting": "Completely Reported", - "pt_party": "Party", - "viewastable": "View as table", - "copied": "Copied", - "label_countiesparticipating_state_selectcounty": "Counties Participating", - "pt_totalballots": "Total Ballots", - "ballotcountingheader": "Ballot Counting Progress", - "legend_completelyreported_state_countiesreporting": "County has reported results", - "mccontestboxbar": "Combined results", - "area2": "County", - "label_notreporting_county_precinctsreporting": "Not Reporting", - "resultstab": "Results", - "legend_notparticipating_state_selectcounty": "County is not participating in the election", - "label_tie_county_contestdetail": "Tie", - "county": "County", - "showcontests": "Show Contests", - "ballot_oval_selection": "Ballot Oval Selection", - "areafilter": "", - "nextcontest": "Next Contest", - "vcnotreported": "Not reported", - "legend_notreporting_county_results": "Precinct is participating in this contest but has not reported results", - "runoff": "runoff", - "xlsvtdescription": "Precinct level details for election results", - "rpttittlexml": "Detail XML", - "countiesreporting": "Counties Complete", - "facebookappid": "236723726432666", - "turnout_registered_voters": "Registered Voters", - "txtdescription": "County level details for election results. Contains votes received by choice in each contest for all participating counties.", - "reporting": "Counties Reporting", - "winningpartyfilter": "Winning Party Filter", - "participating": "Counties Participating", - "voterturnouticon": "Voter Turnout", - "pt_precinctturnouttab": "Precinct Turnout", - "label_greaterthan_county_turnout": "Greater Than", - "legend_notreporting_county_votecenter": "Vote Center is participating in the election but has not reported results", - "cumulative": "Cumulative", - "loading": "Loading...", - "mapsnotavailable": "Maps Not Available", - "votes": "Votes", - "chooselanguage": "Language", - "secondarymenu": "Secondary Menu", - "stateprecinctsreporting": "precincts complete", - "hidesidenavvoterturnoutpanel": "0", - "rptdescriptionxls": "County level details for election results. Contains votes received by choice in each contest for all participating precincts.", - "legend_notcompletelyreported_state_countiesreporting": "County is reporting results but has not completely reported", - "emailsubject": "", - "hideturnoutnavigation": "0", - "legend_notparticipating_county_votecenter": "Vote Center is not participating in the election", - "expandopen": "expand/open", - "add": "Add", - "scrollpagetitle": "Election name", - "vcnotparticipating": "Not Participating", - "close": "Close", - "legend_countycertifiedresults_state_countiesreporting": "County results have been validated and are official", - "legend_notparticipating_state_turnout": "County is not participating in the election", - "typecsvlabel": "Summary CSV", - "mccontestdetailbar": "Combined results", - "label_notparticipating_state_selectcounty": "Counties Not Participating", - "chartpie": "Pie", - "map": "Map", - "areasreportinglinktext": "See Counties Reporting", - "legend_notparticipating_county_results": "Precinct is not participating in this contest", - "filterresults": "Filter Results", - "label_partiallyreported_county_precinctsreporting": "Partially Reported", - "vcpartiallyreported": "Partially reported", - "votetypedatahiddenmsg": "This information has been hidden because the vote type has less than 10 votes", - "typetxtlabel": "Detail TXT", - "districtfilter": "District", - "selectcounty": "Results By County", - "turnout": "Turnout", - "legend_tie_county_contestdetail": "Tie", - "label_notparticipating_state_turnout": "Not Participating", - "legend_countiescertifiedresults_state_selectcounty": "County results have been validated and are official", - "scripts": "document.querySelector('.col-md-6.footer-left ').innerHTML = 'Copyright ' + (new Date()).getFullYear() + ' - www.scytl.us';", - "label_partiallyreported_county_votecenter": "Partially Reported", - "tie": "Tie", - "mainmenu": "Main Menu", - "voterturnoutbypartyicon": "Voter Turnout By Party", - "certified": "County Certified Results", - "label_notreporting_state_countiesreporting": "Not Reporting", - "legend_notparticipating_county_contestdetail": "Precinct is not participating in this contest", - "legend_completelyreported_county_votecenter": "Vote Center has reported results", - "legend_partiallyreported_county_votecenter": "Vote Center is reporting results but has not completely reported", - "rpttittletxt": "Detail TXT", - "addcategorytomyfavraces": "Add all contest to my favorite races", - "straightpartyvotes": "Straight Party Votes", - "filterstitle": "Filter", - "legend_notreporting_state_turnout": "County is participating in the election but has not reported results", - "xmldescription": "County level details for election results. Contains votes received by choice in each contest for all participating counties.", - "pagesubtitle": "\n\n\n\n\n\n
 
\n
Election Date - May 24, 2022
Recount Start Date - June 8, 2022
", - "area": "Precinct", - "voterturnout": "Voter Turnout", - "previouscontest": "Previous Contest", - "ballotcountingdescription": "Based on estimated total ballots expected to be cast, including ballots that will be counted after Election Day. Polling place and early Vote by Mail ballots are counted on Election Night. Last-minute Vote by Mail ballots and provisional ballots are counted after Election Day. Actual number of ballots cast is not known until counting is completed.", - "electioncompleted": "Election Night Complete", - "votescast": "Vote Cast", - "download": "Download", - "label_notparticipating_county_contestdetail": "Not Participating", - "votefor": "Vote For", - "legend_countiesparticipating_state_selectcounty": "County is participating in this contest", - "legend_tie": "Tie", - "vcreporting": "Vote Centers Reported", - "label_notparticipating_county_precinctsreporting": "Not Participating", - "cancel": "Cancel", - "notreported": "Not Reporting", - "all": "All", - "ballotcountinglink": "", - "legend_notreporting_state_contestdetail": "County is participating in the election but has not reported results", - "partycandidateheader": "Party / Candidate", - "recount": "recount", - "greaterthan": "Greater than", - "footermessage": "Powered by SCYTL", - "latestnewsheader": "Latest News", - "addfavrace": "Add my favorite races", - "pt_registeredvoters": "Registered Voters", - "legend_notparticipating_state_results": "County is not participating in this contest", - "rptdescriptiontxt": "County level details for election results. Contains votes received by choice in each contest for all participating precincts.", - "noresults": "No Results", - "chart": "Chart", - "legend_notparticipating_state_countiesreporting": "County is not participating in the election", - "twittertitle": "Check out this election", - "header": "
\n
\n
Official Recount Results
\n
\n
\n", - "scrollpagereportingheader": "Precincts Reporting", - "facebooktext": "", - "legend_electionnightcomplete_state_countiesreporting": "County is finished reporting results for election night, but this does not indicate all results have been uploaded", - "chartdonut": "Donut", - "xlsdescription": "County level details for election results. Contains votes received by choice in each contest for all participating counties.", - "label_notreporting_state_turnout": "Not Reporting", - "downloadreportheader": "Reports", - "facebooktitle": "", - "pt_partyturnouttab": "Party Turnout", - "pt_electionturnout": "Election Turnout", - "votetypefilter": "Vote Type Filter", - "label_notreporting_county_turnout": "Not Reporting", - "precinctsreporting": "Precincts Complete", - "label_notparticipating_state_countiesreporting": "Not Participating", - "resourcesheader": "Resources", - "typexlsvtlabel": "Detailed XLS without Vote Types", - "legend_notreporting_county_turnout": "Precinct is participating in the election but has not reported results", - "noresultsonfilters": "No results found.", - "notparticipating": "Counties not participating", - "rpttittlecsv": "Summary CSV", - "mobilemenu": "Mobile Menu", - "label_notparticipating_county_votecenter": "Not Participating", - "legend_notreporting_state_results": "County is participating in this contest but has not reported results", - "label_completelyreported_state_countiesreporting": "Completely Reported", - "legend_greaterthan_county_turnout": "Greater Than", - "label_notreporting_county_contestdetail": "Not Reporting", - "scrollpagereportingwidget": "Precincts Reporting", - "label_notreporting_county_votecenter": "Not Reporting", - "typexmllabel": "Detail XML", - "vccompletelyreported": "Completely reported", - "areas": "Precincts", - "pagetitle": "
US House District 10 Democratic Primary Recount
", - "legend_notparticipating_county_turnout": "Precinct is not participating in the election", - "print": "Print", - "rpttittlexls": "Detail XLS", - "contestsnumber": "# of contests in this election:", - "votesheader": "Votes", - "label_notreporting_state_contestdetail": "Not Reporting", - "precinct": "Precinct", - "votetypestab": "Vote types", - "generalturnout": "General Turnout", - "of": "Of", - "titleturnoutbyparty": "Turnout by Party", - "nochoicesdefined": "No choices defined", - "nofavorites": "There are no favorite races selected. Return to the Results page and mark your favorite races with a star.", - "areasreportingheader": "Counties/Precincts Reporting", - "label_countycertifiedresults_state_selectcounty": "County Certified Results", - "brandname": "", - "label_notparticipating_state_contestdetail": "Not Participating", - "partiallyreported": "Partially Reported", - "rptdescriptionxml": "County level details for election results. Contains votes received by choice in each contest for all participating precincts.", - "statearea": "", - "collapseclose": "collapse/close", - "share": "Share", - "favoriterace": "favorite race", - "percentageheader": "Percentage", - "ballotscast": "Ballots Cast", - "city": "", - "home": "Home", - "voterturnoutlink": "Voter Turnout", - "myracestracker": "My Favorite Races", - "linkturnoutbyparty": "See Turnout by Party", - "chartbar": "Bar", - "editinline": "Edit in-line", - "legend_completelyreported_county_precinctsreporting": "Precinct has reported results", - "twitterwidget": "Twitter Widget", - "footertitle": "Copyright 2019 - www.scytl.us", - "remove": "Remove", - "legend_notreporting_state_countiesreporting": "County is participating in the election but has not reported results", - "legend_notreporting_county_precinctsreporting": "Precinct is participating in the election but has not reported results", - "facebookdescription": "", - "templateheader": "Georgia Secretary of State BRAD RAFFENSPERGER", - "registeredvoters": "Registered Voters", - "hasnotreported": "Has Not Reported", - "label_greaterthan_state_turnout": "Greater Than", - "select_all": "Select all" - } - }, - "websiteupdatedat": "6/14/2022 4:20:01 PM EDT", - "summaryfilesize": "440 bytes", - "detailxmlfilesize": "2.6 KB", - "detailxlsfilesize": "3.0 KB", - "detailtxtfilesize": "2.0 KB", - "stateprecinctsreporting": 160, - "stateprecinctsparticipating": 160, - "stateprecinctspartiallyreporting": 0, - "language": "en", - "templatetype": "Web", - "entityname": "GA", - "parententityname": "null", - "isdistrictmaps": false, - "isusingunderovervotes": false, - "versions": [ - "295405", - "295408", - "295419", - "295423", - "295427", - "295433", - "295436", - "295439", - "295442", - "295444", - "295447", - "295449", - "295451", - "295454", - "295456", - "295459", - "295463", - "295467", - "295469", - "295472", - "295474", - "295478", - "295480", - "295487", - "295527", - "295536", - "295545", - "295589", - "295613", - "295628", - "295631", - "295633", - "295639", - "295659", - "295799", - "296080", - "296092", - "296171", - "296216", - "296240", - "296247", - "296248", - "296250", - "296262" - ], - "ismodifiededitline": true -} \ No newline at end of file diff --git a/test/test_scytl/mock_responses/GA_Barrow_114737_295501_reports_detailxml.zip b/test/test_scytl/mock_responses/GA_Barrow_114737_295501_reports_detailxml.zip deleted file mode 100644 index ae9a10c155..0000000000 Binary files a/test/test_scytl/mock_responses/GA_Barrow_114737_295501_reports_detailxml.zip and /dev/null differ diff --git a/test/test_scytl/mock_responses/GA_Clarke_114759_295888_reports_detailxml.zip b/test/test_scytl/mock_responses/GA_Clarke_114759_295888_reports_detailxml.zip deleted file mode 100644 index 406ecbc6cd..0000000000 Binary files a/test/test_scytl/mock_responses/GA_Clarke_114759_295888_reports_detailxml.zip and /dev/null differ diff --git a/test/test_scytl/test_scytl.py b/test/test_scytl/test_scytl.py deleted file mode 100644 index eb0309454c..0000000000 --- a/test/test_scytl/test_scytl.py +++ /dev/null @@ -1,243 +0,0 @@ -import unittest -import os -import requests_mock -import csv -from parsons.scytl import Scytl, scytl - -TEST_STATE = "GA" -TEST_ELECTION_ID = "114729" -TEST_VERSION_NUM = "296262" - -_DIR = os.path.dirname(__file__) - - -class TestScytl(unittest.TestCase): - def setUp(self): - self.scy = Scytl(TEST_STATE, TEST_ELECTION_ID) - - self.requests_mock = requests_mock.Mocker() - - self._mock_responses(self.requests_mock) - - def tearDown(self) -> None: - self.requests_mock.stop() - - def test_get_summary_results_succeeds(self): - result = self.scy.get_summary_results() - - with open(f"{_DIR}/114729_summary_expected.csv", "r") as expected: - expectedResult = list(csv.DictReader(expected, delimiter=",")) - - for i, row in enumerate(result): - expectedResultRow = expectedResult[i] - - expectedResultRow["counties_reporting"] = ( - expectedResultRow["counties_reporting"] or None - ) - expectedResultRow["total_counties"] = ( - expectedResultRow["total_counties"] or None - ) - - self.assertDictEqual(row, expectedResultRow) - - def test_get_summary_results_skips_if_no_version_update(self): - result = self.scy.get_summary_results() - - self.assertIsNotNone(result) - - result = self.scy.get_summary_results() - - self.assertIsNone(result) - - result = self.scy.get_summary_results(True) - - self.assertIsNotNone(result) - - def test_get_detailed_results_succeeds(self): - result = self.scy.get_detailed_results() - - with open(f"{_DIR}/114729_county_expected.csv", "r") as expected: - expectedResult = list(csv.DictReader(expected, delimiter=",")) - - for i in range(len(result)): - expectedResultRow = expectedResult[i] - - expectedResultRow["recorded_votes"] = int( - expectedResultRow["recorded_votes"] - ) - expectedResultRow[ - "timestamp_last_updated" - ] = self.scy._parse_date_to_utc( - expectedResultRow["timestamp_last_updated"] - ) - - self.assertDictEqual(result[i], expectedResultRow) - - def test_get_detailed_results_skips_if_no_version_update(self): - result = self.scy.get_detailed_results() - - self.assertIsNotNone(result) - - result = self.scy.get_detailed_results() - - self.assertIsNone(result) - - result = self.scy.get_detailed_results(True) - - self.assertIsNotNone(result) - - def test_get_detailed_results_for_participating_counties_succeeds(self): - _, result = self.scy.get_detailed_results_for_participating_counties() - - with open(f"{_DIR}/114729_precinct_expected.csv", "r") as expected: - expectedResult = list(csv.DictReader(expected, delimiter=",")) - - for i in range(len(result)): - expectedResultRow = expectedResult[i] - - expectedResultRow["recorded_votes"] = int( - expectedResultRow["recorded_votes"] - ) - expectedResultRow[ - "timestamp_last_updated" - ] = self.scy._parse_date_to_utc( - expectedResultRow["timestamp_last_updated"] - ) - - self.assertDictEqual(result[i], expectedResultRow) - - def test_get_detailed_results_for_participating_counties_succeeds_for_two_counties( - self, - ): - counties = ["Barrow", "Clarke"] - - _, result = self.scy.get_detailed_results_for_participating_counties( - county_names=counties - ) - - with open(f"{_DIR}/114729_precinct_expected.csv", "r") as expected: - expectedResult = csv.DictReader(expected, delimiter=",") - - filteredExpectedResults = list( - filter(lambda x: x["county_name"] in counties, expectedResult) - ) - - for i, row in enumerate(result): - expectedResultRow = filteredExpectedResults[i] - - expectedResultRow["recorded_votes"] = int( - expectedResultRow["recorded_votes"] - ) - expectedResultRow[ - "timestamp_last_updated" - ] = self.scy._parse_date_to_utc( - expectedResultRow["timestamp_last_updated"] - ) - - self.assertDictEqual(row, expectedResultRow) - - def test_get_detailed_results_for_participating_counties_missing_counties_update( - self, - ): - counties = ["Barrow"] - - _, result = self.scy.get_detailed_results_for_participating_counties( - county_names=counties - ) - - self.assertNotEqual(result, []) - - self.scy.previous_county_details_version_num = None - - _, result = self.scy.get_detailed_results_for_participating_counties() - - self.assertNotEqual(result, []) - - self.assertTrue(all(x["county_name"] not in counties for x in result)) - - def test_get_detailed_results_for_participating_counties_skips_if_no_version_update( - self, - ): - _, result = self.scy.get_detailed_results_for_participating_counties() - - self.assertNotEqual(result, []) - - _, result = self.scy.get_detailed_results_for_participating_counties() - - self.assertEqual(result, []) - - _, result = self.scy.get_detailed_results_for_participating_counties( - force_update=True - ) - - self.assertNotEqual(result, []) - - def test_get_detailed_results_for_participating_counties_skips_if_no_county_version_update( - self, - ): - _, result = self.scy.get_detailed_results_for_participating_counties() - - self.assertNotEqual(result, []) - - self.scy.previous_county_details_version_num = None - - _, result = self.scy.get_detailed_results_for_participating_counties() - - self.assertEqual(result, []) - - def test_get_detailed_results_for_participating_counties_repeats_failed_counties( - self, - ): - _, result = self.scy.get_detailed_results_for_participating_counties() - - self.assertNotEqual(result, []) - - self.scy.previous_county_details_version_num = None - self.scy.previously_fetched_counties.remove("Barrow") - - _, result = self.scy.get_detailed_results_for_participating_counties() - - self.assertNotEqual(result, []) - - def _mock_responses(self, m: requests_mock.Mocker): - mock_current_version_url = scytl.CURRENT_VERSION_URL_TEMPLATE.format( - administrator=TEST_STATE, election_id=TEST_ELECTION_ID - ) - - m.get(mock_current_version_url, text=TEST_VERSION_NUM) - - mock_election_settings_url = scytl.ELECTION_SETTINGS_JSON_URL_TEMPLATE.format( - state=TEST_STATE, election_id=TEST_ELECTION_ID, version_num=TEST_VERSION_NUM - ) - - with open( - f"{_DIR}/GA_114729_296262_county_election_settings.json", "r" - ) as details_file: - m.get(mock_election_settings_url, text=details_file.read()) - - for file in os.listdir(f"{_DIR}/mock_responses"): - with open(f"{_DIR}/mock_responses/{file}", "rb") as details_file: - file_url = f"https://results.enr.clarityelections.com/{file}".replace( - "_", "/" - ) - m.get(file_url, content=details_file.read()) - - mock_summary_csv_url = scytl.SUMMARY_CSV_ZIP_URL_TEMPLATE.format( - administrator=TEST_STATE, - election_id=TEST_ELECTION_ID, - version_num=TEST_VERSION_NUM, - ) - - with open(f"{_DIR}/114729_summary.zip", "rb") as summary: - m.get(mock_summary_csv_url, content=summary.read()) - - mock_detail_xml_url = scytl.DETAIL_XML_ZIP_URL_TEMPLATE.format( - administrator=TEST_STATE, - election_id=TEST_ELECTION_ID, - version_num=TEST_VERSION_NUM, - ) - - with open(f"{_DIR}/114729_detailxml.zip", "rb") as detailxml: - m.get(mock_detail_xml_url, content=detailxml.read()) - - m.start() diff --git a/test/test_sendmail.py b/test/test_sendmail.py deleted file mode 100644 index 465b8cc7c0..0000000000 --- a/test/test_sendmail.py +++ /dev/null @@ -1,192 +0,0 @@ -import io -import pytest -from email.mime.text import MIMEText -from email.mime.multipart import MIMEMultipart -from email.mime.image import MIMEImage -from email.mime.audio import MIMEAudio -from email.mime.application import MIMEApplication -from email.mime.base import MIMEBase - -from parsons.notifications.sendmail import EmptyListError, SendMail - - -@pytest.fixture(scope="function") -def dummy_sendmail(): - """Have to create a dummy class that inherits from SendMail and defines a couple - of methods in order to test out the methods that aren't abstract. - """ - - class DummySendMail(SendMail): - def __init__(self): - pass - - def _send_message(self, message): - pass - - return DummySendMail() - - -class TestSendMailCreateMessageSimple: - def test_creates_mimetext_message(self, dummy_sendmail): - message = dummy_sendmail._create_message_simple("from", "to", "subject", "text") - assert isinstance(message, MIMEText) - - def test_message_contents_set_appropriately(self, dummy_sendmail): - message = dummy_sendmail._create_message_simple("from", "to", "subject", "text") - assert message.get("from") == "from" - assert message.get("to") == "to" - assert message.get("subject") == "subject" - assert message.get_payload() == "text" - - -class TestSendMailCreateMessageHtml: - def test_creates_multipart_message(self, dummy_sendmail): - message = dummy_sendmail._create_message_html( - "from", "to", "subject", "text", "html" - ) - assert isinstance(message, MIMEMultipart) - - def test_sets_to_from_subject(self, dummy_sendmail): - message = dummy_sendmail._create_message_html( - "from", "to", "subject", "text", "html" - ) - assert message.get("from") == "from" - assert message.get("to") == "to" - assert message.get("subject") == "subject" - - def test_works_if_no_message_text(self, dummy_sendmail): - message = dummy_sendmail._create_message_html( - "from", "to", "subject", None, "html" - ) - assert len(message.get_payload()) == 1 - assert message.get_payload()[0].get_payload() == "html" - assert message.get_payload()[0].get_content_type() == "text/html" - - def test_works_with_text_and_html(self, dummy_sendmail): - message = dummy_sendmail._create_message_html( - "from", "to", "subject", "text", "html" - ) - assert len(message.get_payload()) == 2 - assert message.get_payload()[0].get_payload() == "text" - assert message.get_payload()[0].get_content_type() == "text/plain" - assert message.get_payload()[1].get_payload() == "html" - assert message.get_payload()[1].get_content_type() == "text/html" - - -class TestSendMailCreateMessageAttachments: - def test_creates_multipart_message(self, dummy_sendmail): - message = dummy_sendmail._create_message_attachments( - "from", "to", "subject", "text", [] - ) - assert isinstance(message, MIMEMultipart) - - def test_can_handle_html(self, dummy_sendmail): - message = dummy_sendmail._create_message_attachments( - "from", "to", "subject", "text", [], message_html="html" - ) - assert len(message.get_payload()) == 2 - assert message.get_payload()[0].get_payload() == "text" - assert message.get_payload()[0].get_content_type() == "text/plain" - assert message.get_payload()[1].get_payload() == "html" - assert message.get_payload()[1].get_content_type() == "text/html" - - @pytest.mark.parametrize( - "filename,expected_type", - [ - ("image.png", MIMEImage), - ("application.exe", MIMEApplication), - ("text.txt", MIMEText), - ("audio.mp3", MIMEAudio), - ( - "video.mp4", - MIMEBase, - ), # This will fail if the method is updated to parse video - ], - ) - def test_properly_detects_file_types( - self, tmp_path, dummy_sendmail, filename, expected_type - ): - filename = tmp_path / filename - filename.write_bytes(b"Parsons") - message = dummy_sendmail._create_message_attachments( - "from", "to", "subject", "text", [filename] - ) - assert len(message.get_payload()) == 2 # text body plus attachment - assert isinstance(message.get_payload()[1], expected_type) - - @pytest.mark.parametrize("buffer", [io.StringIO, io.BytesIO]) - def test_works_with_buffers(self, dummy_sendmail, buffer): - value = "Parsons" - if buffer is io.BytesIO: - value = b"Parsons" - message = dummy_sendmail._create_message_attachments( - "from", "to", "subject", "text", [buffer(value)] - ) - assert len(message.get_payload()) == 2 # text body plus attachment - assert isinstance(message.get_payload()[1], MIMEApplication) - - -class TestSendMailValidateEmailString: - @pytest.mark.parametrize("bad_email", ["a", "a@", "a+b", "@b.com"]) - def test_errors_with_invalid_emails(self, dummy_sendmail, bad_email): - with pytest.raises(ValueError): - dummy_sendmail._validate_email_string(bad_email) - - @pytest.mark.parametrize("good_email", ["a@b", "a+b@c", "a@d.com", "a@b.org"]) - def test_passes_valid_emails(self, dummy_sendmail, good_email): - dummy_sendmail._validate_email_string(good_email) - - -class TestSendMailSendEmail: - @pytest.fixture(scope="function") - def patched_sendmail(self): - class PatchedSendMail(SendMail): - def __init__(self): - pass - - def _send_message(self, message): - self.message = message # Stores message for post-call introspection - - return PatchedSendMail() - - def test_errors_when_send_message_not_implemented(self): - with pytest.raises( - TypeError, match="Can't instantiate abstract class SendMail" - ): - SendMail().send_email("from@from.com", "to@to.com", "subject", "text") - - def test_can_handle_lists_of_emails(self, patched_sendmail): - patched_sendmail.send_email( - "from", ["to1@to1.com", "to2@to2.com"], "subject", "text" - ) - assert patched_sendmail.message.get("to") == "to1@to1.com, to2@to2.com" - - def test_errors_if_an_email_in_a_list_doesnt_validate(self, patched_sendmail): - with pytest.raises(ValueError, match="Invalid email address"): - patched_sendmail.send_email( - "from", ["to1@to1.com", "invalid", "to2@to2.com"], "subject", "text" - ) - - def test_errors_if_no_to_email_is_specified(self, patched_sendmail): - with pytest.raises(EmptyListError, match="Must contain at least 1 email"): - patched_sendmail.send_email("from", [], "subject", "text") - - def test_appropriately_dispatches_html_email(self, patched_sendmail): - patched_sendmail.send_email( - "from", "to@to.com", "subject", "text", message_html="html" - ) - assert len(patched_sendmail.message.get_payload()) == 2 - assert ( - patched_sendmail.message.get_payload()[1].get_content_type() == "text/html" - ) - - def test_appropriately_handles_filename_specified_as_string( - self, tmp_path, patched_sendmail - ): - filename = tmp_path / "test.txt" - filename.write_bytes(b"Parsons") - patched_sendmail.send_email( - "from", "to@to.com", "subject", "text", files=str(filename) - ) - assert len(patched_sendmail.message.get_payload()) == 2 - assert isinstance(patched_sendmail.message.get_payload()[1], MIMEText) diff --git a/test/test_sftp.py b/test/test_sftp.py index 09a01ba0af..3109756878 100644 --- a/test/test_sftp.py +++ b/test/test_sftp.py @@ -4,33 +4,23 @@ from contextlib import contextmanager from copy import deepcopy from unittest.mock import MagicMock, patch, call -from parsons import Table, SFTP +from parsons.etl import Table +from parsons.sftp import SFTP from parsons.utilities import files as file_util from test.utils import mark_live_test, assert_matching_tables -from test.fixtures import ( # noqa: F401 - simple_table, - simple_csv_path, - simple_compressed_csv_path, -) +from test.fixtures import simple_table, simple_csv_path, simple_compressed_csv_path # noqa; F401 # # Fixtures and constants # REMOTE_DIR, CSV, COMPRESSED_CSV, EMPTY, SUBDIR_A, SUBDIR_B, CSV_A, CSV_B = [ - "parsons_test", - "test.csv", - "test.csv.gz", - "empty", - "subdir_a", - "subdir_b", - "test_a.csv", - "test_b.csv", + 'parsons_test', 'test.csv', 'test.csv.gz', 'empty', 'subdir_a', 'subdir_b', 'test_a.csv', + 'test_b.csv' ] CSV_PATH, COMPRESSED_CSV_PATH, EMPTY_PATH, SUBDIR_A_PATH, SUBDIR_B_PATH = [ - f"{REMOTE_DIR}/{content}" - for content in (CSV, COMPRESSED_CSV, EMPTY, SUBDIR_A, SUBDIR_B) + f"{REMOTE_DIR}/{content}" for content in (CSV, COMPRESSED_CSV, EMPTY, SUBDIR_A, SUBDIR_B) ] CSV_A_PATH, CSV_B_PATH = [ @@ -47,7 +37,7 @@ def sup(sftp, simple_csv_path, simple_compressed_csv_path): # noqa: F811 sftp.make_directory(remote_dir) for remote_file in FILE_PATHS: - fixture = simple_compressed_csv_path if "gz" in remote_file else simple_csv_path + fixture = simple_compressed_csv_path if 'gz' in remote_file else simple_csv_path sftp.put_file(fixture, remote_file) @@ -59,9 +49,9 @@ def cleanup(sftp): def generate_live_sftp_connection(): - host = os.environ["SFTP_HOST"] - username = os.environ["SFTP_USERNAME"] - password = os.environ["SFTP_PASSWORD"] + host = os.environ['SFTP_HOST'] + username = os.environ['SFTP_USERNAME'] + password = os.environ['SFTP_PASSWORD'] return SFTP(host, username, password) @@ -76,14 +66,12 @@ def live_sftp(simple_csv_path, simple_compressed_csv_path, simple_table): # noq # This second live_sftp fixture is used for test_get_files so that files are never downloaded and # mocks can be inspected. @pytest.fixture -def live_sftp_with_mocked_get( - simple_csv_path, simple_compressed_csv_path # noqa: F811 -): +def live_sftp_with_mocked_get(simple_csv_path, simple_compressed_csv_path): # noqa: F811 SFTP_with_mocked_get = deepcopy(SFTP) # The names of temp files are long arbitrary strings. This makes them predictable. def rv(magic_mock): - return ["foo", "bar", "baz"][magic_mock.call_count] + return ['foo', 'bar', 'baz'][magic_mock.call_count] get = MagicMock() create_temp_file_for_path = MagicMock() @@ -128,7 +116,6 @@ def get_file(self, remote_path, local_path=None, connection=None): cleanup(sftp) - # # Tests # @@ -139,13 +126,13 @@ def test_credential_validation(): SFTP(host=None, username=None, password=None) with pytest.raises(ValueError): - SFTP(host=None, username="sam", password="abc123") + SFTP(host=None, username='sam', password='abc123') @mark_live_test def test_list_non_existent_directory(live_sftp): with pytest.raises(FileNotFoundError): - live_sftp.list_directory("abc123") + live_sftp.list_directory('abc123') @mark_live_test @@ -157,7 +144,7 @@ def test_list_directory_with_files(live_sftp): @mark_live_test def test_get_non_existent_file(live_sftp): with pytest.raises(FileNotFoundError): - live_sftp.get_file("abc123") + live_sftp.get_file('abc123') # Helper function @@ -187,17 +174,15 @@ def test_get_temp_file(live_sftp, simple_table): # noqa F811 @mark_live_test -@pytest.mark.parametrize("compression", [None, "gzip"]) +@pytest.mark.parametrize('compression', [None, 'gzip']) def test_table_to_sftp_csv(live_sftp, simple_table, compression): # noqa F811 - host = os.environ["SFTP_HOST"] - username = os.environ["SFTP_USERNAME"] - password = os.environ["SFTP_PASSWORD"] - remote_path = f"{REMOTE_DIR}/test_to_sftp.csv" - if compression == "gzip": - remote_path += ".gz" - simple_table.to_sftp_csv( - remote_path, host, username, password, compression=compression - ) + host = os.environ['SFTP_HOST'] + username = os.environ['SFTP_USERNAME'] + password = os.environ['SFTP_PASSWORD'] + remote_path = f'{REMOTE_DIR}/test_to_sftp.csv' + if compression == 'gzip': + remote_path += '.gz' + simple_table.to_sftp_csv(remote_path, host, username, password, compression=compression) local_path = live_sftp.get_file(remote_path) assert_file_matches_table(local_path, simple_table) @@ -205,7 +190,6 @@ def test_table_to_sftp_csv(live_sftp, simple_table, compression): # noqa F811 # Cleanup live_sftp.remove_file(remote_path) - # # Helper Functions # @@ -233,7 +217,7 @@ def test_list_files(live_sftp): @mark_live_test def test_list_files_with_pattern(live_sftp): - result = live_sftp.list_files(REMOTE_DIR, pattern="gz") + result = live_sftp.list_files(REMOTE_DIR, pattern='gz') assert result == [COMPRESSED_CSV_PATH] @@ -245,57 +229,50 @@ def test_list_subdirectories(live_sftp): @mark_live_test def test_list_subdirectories_with_pattern(live_sftp): - result = sorted(live_sftp.list_subdirectories(REMOTE_DIR, pattern="sub")) + result = sorted(live_sftp.list_subdirectories(REMOTE_DIR, pattern='sub')) assert result == [SUBDIR_A_PATH, SUBDIR_B_PATH] -local_paths = ["foo", "bar"] +local_paths = ['foo', 'bar'] # The following are values for the arguments to pass to `get_files` and `walk_tree` as well as the # strings expected to be found in the returned results. args_and_expected = { - "get_files": [ - ({"remote": REMOTE_DIR}, [CSV_PATH, COMPRESSED_CSV_PATH]), - ({"remote": [SUBDIR_A_PATH, SUBDIR_B_PATH]}, [CSV_B_PATH, CSV_A_PATH]), - ( - {"remote": SUBDIR_B_PATH, "files_to_download": [CSV_B_PATH]}, - [CSV_A_PATH, CSV_B_PATH], - ), - ({"remote": [SUBDIR_A_PATH, SUBDIR_B_PATH], "pattern": "a"}, [CSV_B_PATH]), + 'get_files': [ + ({'remote': REMOTE_DIR}, [CSV_PATH, COMPRESSED_CSV_PATH]), + ({'remote': [SUBDIR_A_PATH, SUBDIR_B_PATH]}, [CSV_B_PATH, CSV_A_PATH]), + ({'remote': SUBDIR_B_PATH, 'files_to_download': [CSV_B_PATH]}, [CSV_A_PATH, CSV_B_PATH]), + ({'remote': [SUBDIR_A_PATH, SUBDIR_B_PATH], 'pattern': 'a'}, [CSV_B_PATH]) ], - "walk_tree": [ + 'walk_tree': [ ( [REMOTE_DIR], - {"download": False, "dir_pattern": SUBDIR_A}, - [[SUBDIR_A], [COMPRESSED_CSV, CSV, CSV_B]], + {'download': False, 'dir_pattern': SUBDIR_A}, + [[SUBDIR_A], [COMPRESSED_CSV, CSV, CSV_B]] ), ( [REMOTE_DIR], - {"download": False, "file_pattern": CSV_B}, - [[SUBDIR_A, SUBDIR_B, EMPTY], [CSV_B]], + {'download': False, 'file_pattern': CSV_B}, + [[SUBDIR_A, SUBDIR_B, EMPTY], [CSV_B]] ), ( [REMOTE_DIR], - {"download": False, "dir_pattern": SUBDIR_A, "file_pattern": CSV_B}, - [[SUBDIR_A], [CSV_B]], + {'download': False, 'dir_pattern': SUBDIR_A, 'file_pattern': CSV_B}, + [[SUBDIR_A], [CSV_B]] ), ( [REMOTE_DIR], - {"download": False, "max_depth": 1}, - [[EMPTY, SUBDIR_A, SUBDIR_B], [CSV, COMPRESSED_CSV]], - ), - ], + {'download': False, 'max_depth': 1}, + [[EMPTY, SUBDIR_A, SUBDIR_B], [CSV, COMPRESSED_CSV]] + ) + ] } @mark_live_test -def test_get_files_calls_get_to_write_to_provided_local_paths( - live_sftp_with_mocked_get, -): +def test_get_files_calls_get_to_write_to_provided_local_paths(live_sftp_with_mocked_get): live_sftp, get = live_sftp_with_mocked_get - results = live_sftp.get_files( - remote=[SUBDIR_A_PATH, SUBDIR_B_PATH], local_paths=local_paths - ) + results = live_sftp.get_files(remote=[SUBDIR_A_PATH, SUBDIR_B_PATH], local_paths=local_paths) assert get.call_count == 2 calls = [call(CSV_A_PATH, local_paths[0]), call(CSV_B_PATH, local_paths[1])] assert_has_calls(get, calls) @@ -303,10 +280,8 @@ def test_get_files_calls_get_to_write_to_provided_local_paths( @mark_live_test -@pytest.mark.parametrize("kwargs,expected", args_and_expected["get_files"]) -def test_get_files_calls_get_to_write_temp_files( - kwargs, expected, live_sftp_with_mocked_get -): +@pytest.mark.parametrize('kwargs,expected', args_and_expected['get_files']) +def test_get_files_calls_get_to_write_temp_files(kwargs, expected, live_sftp_with_mocked_get): live_sftp, get = live_sftp_with_mocked_get live_sftp.get_files(**kwargs) assert get.call_count == len(expected) @@ -321,14 +296,14 @@ def test_get_files_raises_error_when_no_file_source_is_provided(live_sftp): @mark_live_test -@patch("parsons.sftp.SFTP.get_file") +@patch('parsons.sftp.SFTP.get_file') def test_get_files_with_files_paths_mismatch(get_file, live_sftp): live_sftp.get_files(files_to_download=[CSV_A_PATH], local_paths=local_paths) - assert get_file.call_args[1]["local_path"] is None + assert get_file.call_args[1]['local_path'] is None @mark_live_test -@pytest.mark.parametrize("args,kwargs,expected", args_and_expected["walk_tree"]) +@pytest.mark.parametrize('args,kwargs,expected', args_and_expected['walk_tree']) def test_walk_tree(args, kwargs, expected, live_sftp_with_mocked_get): live_sftp, get = live_sftp_with_mocked_get results = live_sftp.walk_tree(*args, **kwargs) @@ -336,7 +311,6 @@ def test_walk_tree(args, kwargs, expected, live_sftp_with_mocked_get): for res, expect in zip(results, expected): assert_results_match_expected(expect, res) - # Stuff that is tested by the live_sftp fixture, so no need to test explicitly: # test_make_directory # test_put_file diff --git a/test/test_sftp_ssh.py b/test/test_sftp_ssh.py index 298e8ed8a3..599f2c5681 100644 --- a/test/test_sftp_ssh.py +++ b/test/test_sftp_ssh.py @@ -1,33 +1,29 @@ import pytest import os -from parsons import Table, SFTP +from parsons.etl import Table +from parsons.sftp import SFTP from parsons.utilities import files from test.utils import mark_live_test, assert_matching_tables -from test.fixtures import ( # noqa: F401 - simple_table, - simple_csv_path, - simple_compressed_csv_path, -) - +from test.fixtures import simple_table, simple_csv_path, simple_compressed_csv_path # noqa: F401 # # Fixtures and constants # -REMOTE_DIR = "parsons-test" -REMOTE_CSV = "test.csv" -REMOTE_CSV_PATH = f"{REMOTE_DIR}/{REMOTE_CSV}" -REMOTE_COMPRESSED_CSV = "test.csv.gz" -REMOTE_COMPRESSED_CSV_PATH = f"{REMOTE_DIR}/{REMOTE_COMPRESSED_CSV}" +REMOTE_DIR = 'parsons-test' +REMOTE_CSV = 'test.csv' +REMOTE_CSV_PATH = f'{REMOTE_DIR}/{REMOTE_CSV}' +REMOTE_COMPRESSED_CSV = 'test.csv.gz' +REMOTE_COMPRESSED_CSV_PATH = f'{REMOTE_DIR}/{REMOTE_COMPRESSED_CSV}' @pytest.fixture def live_sftp(simple_table, simple_csv_path, simple_compressed_csv_path): # noqa: F811 # Generate a live SFTP connection based on these env vars - host = os.environ["SFTP_HOST"] - username = os.environ["SFTP_USERNAME"] + host = os.environ['SFTP_HOST'] + username = os.environ['SFTP_USERNAME'] password = None - rsa_private_key_file = os.environ["SFTP_RSA_PRIVATE_KEY_FILE"] + rsa_private_key_file = os.environ['SFTP_RSA_PRIVATE_KEY_FILE'] sftp = SFTP(host, username, password, rsa_private_key_file=rsa_private_key_file) @@ -44,7 +40,6 @@ def live_sftp(simple_table, simple_csv_path, simple_compressed_csv_path): # noq sftp.remove_file(REMOTE_COMPRESSED_CSV_PATH) sftp.remove_directory(REMOTE_DIR) - # # Tests # @@ -56,27 +51,19 @@ def test_credential_validation(): with pytest.raises(ValueError): SFTP( - host=None, - username="sam", - password="abc123", - rsa_private_key_file="/path/to/key/file", - ) + host=None, username='sam', password='abc123', rsa_private_key_file='/path/to/key/file') with pytest.raises(ValueError): SFTP( - host="host", - username=None, - password="abc123", - rsa_private_key_file="/path/to/key/file", - ) + host='host', username=None, password='abc123', rsa_private_key_file='/path/to/key/file') with pytest.raises(ValueError): - SFTP(host="host", username="sam", password=None, rsa_private_key_file=None) + SFTP(host='host', username='sam', password=None, rsa_private_key_file=None) @mark_live_test def test_list_non_existent_directory(live_sftp): - file_list = live_sftp.list_directory("abc123") + file_list = live_sftp.list_directory('abc123') assert len(file_list) == 0 @@ -91,7 +78,7 @@ def test_list_directory_with_files(live_sftp): @mark_live_test def test_get_non_existent_file(live_sftp): with pytest.raises(FileNotFoundError): - live_sftp.get_file("abc123") + live_sftp.get_file('abc123') # Helper function @@ -114,25 +101,21 @@ def test_get_temp_file(live_sftp, simple_table): # noqa: F811 @mark_live_test -@pytest.mark.parametrize("compression", [None, "gzip"]) +@pytest.mark.parametrize('compression', [None, 'gzip']) def test_table_to_sftp_csv(live_sftp, simple_table, compression): # noqa: F811 - host = os.environ["SFTP_HOST"] - username = os.environ["SFTP_USERNAME"] - password = os.environ["SFTP_PASSWORD"] - rsa_private_key_file = os.environ["SFTP_RSA_PRIVATE_KEY_FILE"] + host = os.environ['SFTP_HOST'] + username = os.environ['SFTP_USERNAME'] + password = os.environ['SFTP_PASSWORD'] + rsa_private_key_file = os.environ['SFTP_RSA_PRIVATE_KEY_FILE'] - remote_path = f"{REMOTE_DIR}/test_to_sftp.csv" - if compression == "gzip": - remote_path += ".gz" + remote_path = f'{REMOTE_DIR}/test_to_sftp.csv' + if compression == 'gzip': + remote_path += '.gz' simple_table.to_sftp_csv( - remote_path, - host, - username, - password, + remote_path, host, username, password, rsa_private_key_file=rsa_private_key_file, - compression=compression, - ) + compression=compression) local_path = live_sftp.get_file(remote_path) assert_file_matches_table(local_path, simple_table) @@ -142,26 +125,20 @@ def test_table_to_sftp_csv(live_sftp, simple_table, compression): # noqa: F811 @mark_live_test -@pytest.mark.parametrize("compression", [None, "gzip"]) -def test_table_to_sftp_csv_no_password( - live_sftp, simple_table, compression # noqa: F811 -): - host = os.environ.get("SFTP_HOST") - username = os.environ.get("SFTP_USERNAME") - rsa_private_key_file = os.environ.get("SFTP_RSA_PRIVATE_KEY_FILE") - - remote_path = f"{REMOTE_DIR}/test_to_sftp.csv" - if compression == "gzip": - remote_path += ".gz" +@pytest.mark.parametrize('compression', [None, 'gzip']) +def test_table_to_sftp_csv_no_password(live_sftp, simple_table, compression): # noqa: F811 + host = os.environ.get('SFTP_HOST') + username = os.environ.get('SFTP_USERNAME') + rsa_private_key_file = os.environ.get('SFTP_RSA_PRIVATE_KEY_FILE') + + remote_path = f'{REMOTE_DIR}/test_to_sftp.csv' + if compression == 'gzip': + remote_path += '.gz' simple_table.to_sftp_csv( - remote_path, - host, - username, - None, + remote_path, host, username, None, rsa_private_key_file=rsa_private_key_file, - compression=compression, - ) + compression=compression) local_path = live_sftp.get_file(remote_path) assert_file_matches_table(local_path, simple_table) diff --git a/test/test_shopify.py b/test/test_shopify.py index 87601794d0..4050b05889 100644 --- a/test/test_shopify.py +++ b/test/test_shopify.py @@ -1,158 +1,117 @@ -from parsons import Table, Shopify +from parsons.etl.table import Table +from parsons.shopify.shopify import Shopify from test.utils import assert_matching_tables import requests_mock import unittest -SUBDOMAIN = "myorg" -PASSWORD = "abc123" -API_KEY = "abc123" -API_VERSION = "2020-10" +SUBDOMAIN = 'myorg' +PASSWORD = 'abc123' +API_KEY = 'abc123' +API_VERSION = '2020-10' class TestShopify(unittest.TestCase): - mock_count_all = {"count": 2} - mock_count_date = mock_count_since = {"count": 1} - mock_graphql = {"data": {"orders": {"edges": [{"node": {"id": 1}}]}}} + mock_count_all = { + 'count': 2 + } + mock_count_date = mock_count_since = { + 'count': 1 + } + mock_graphql = { + 'data': { + 'orders': { + 'edges': [{ + 'node': { + 'id': 1 + } + }] + } + } + } mock_orders_all = { - "orders": [ - { - "created_at": "2020-10-19T12:00:00-04:00", - "financial_status": "paid", - "id": 1, - }, - { - "created_at": "2020-10-20T12:00:00-04:00", - "financial_status": "refunded", - "id": 2, - }, - ] + 'orders': [{ + 'created_at': '2020-10-19T12:00:00-04:00', + 'financial_status': 'paid', + 'id': 1 + }, { + 'created_at': '2020-10-20T12:00:00-04:00', + 'financial_status': 'refunded', + 'id': 2 + }] } mock_orders_completed = { - "orders": [ - { - "created_at": "2020-10-19T12:00:00-04:00", - "financial_status": "paid", - "id": 1, - } - ] + 'orders': [{ + 'created_at': '2020-10-19T12:00:00-04:00', + 'financial_status': 'paid', + 'id': 1 + }] } mock_orders_date = mock_orders_since = { - "orders": [ - { - "created_at": "2020-10-20T12:00:00-04:00", - "financial_status": "refunded", - "id": 2, - } - ] + 'orders': [{ + 'created_at': '2020-10-20T12:00:00-04:00', + 'financial_status': 'refunded', + 'id': 2 + }] } - mock_result_all = Table( - [ - ("created_at", "financial_status", "id"), - ("2020-10-19T12:00:00-04:00", "paid", 1), - ("2020-10-20T12:00:00-04:00", "refunded", 2), - ] - ) - mock_result_completed = Table( - [ - ("created_at", "financial_status", "id"), - ("2020-10-19T12:00:00-04:00", "paid", 1), - ] - ) - mock_result_date = mock_result_since = Table( - [ - ("created_at", "financial_status", "id"), - ("2020-10-20T12:00:00-04:00", "refunded", 2), - ] - ) + mock_result_all = Table([('created_at', 'financial_status', 'id'), + ('2020-10-19T12:00:00-04:00', 'paid', 1), ('2020-10-20T12:00:00-04:00', + 'refunded', 2)]) + mock_result_completed = Table([('created_at', 'financial_status', 'id'), + ('2020-10-19T12:00:00-04:00', 'paid', 1)]) + mock_result_date = mock_result_since = Table([('created_at', 'financial_status', 'id'), + ('2020-10-20T12:00:00-04:00', 'refunded', 2)]) def setUp(self): self.shopify = Shopify(SUBDOMAIN, PASSWORD, API_KEY, API_VERSION) @requests_mock.Mocker() def test_get_count(self, m): - m.get( - self.shopify.get_query_url(None, None, "orders", True), - json=self.mock_count_all, - ) - m.get( - self.shopify.get_query_url("2020-10-20", None, "orders", True), - json=self.mock_count_date, - ) - m.get( - self.shopify.get_query_url(None, 2, "orders", True), - json=self.mock_count_since, - ) + m.get(self.shopify.get_query_url(None, None, "orders", True), json=self.mock_count_all) + m.get(self.shopify.get_query_url('2020-10-20', None, "orders", True), + json=self.mock_count_date) + m.get(self.shopify.get_query_url(None, 2, "orders", True), json=self.mock_count_since) self.assertEqual(self.shopify.get_count(None, None, "orders"), 2) - self.assertEqual(self.shopify.get_count("2020-10-20", None, "orders"), 1) + self.assertEqual(self.shopify.get_count('2020-10-20', None, "orders"), 1) self.assertEqual(self.shopify.get_count(None, 2, "orders"), 1) @requests_mock.Mocker() def test_get_orders(self, m): - m.get( - self.shopify.get_query_url(None, None, "orders", False), - json=self.mock_orders_all, - ) - m.get( - self.shopify.get_query_url("2020-10-20", None, "orders", False), - json=self.mock_orders_date, - ) - m.get( - self.shopify.get_query_url(None, 2, "orders", False), - json=self.mock_orders_since, - ) - m.get( - self.shopify.get_query_url(None, None, "orders", False) - + "&financial_status=paid", - json=self.mock_orders_completed, - ) - assert_matching_tables( - self.shopify.get_orders(None, None, False), self.mock_result_all - ) - assert_matching_tables( - self.shopify.get_orders("2020-10-20", None, False), self.mock_result_date - ) - assert_matching_tables( - self.shopify.get_orders(None, 2, False), self.mock_result_since - ) - assert_matching_tables( - self.shopify.get_orders(None, None, True), self.mock_result_completed - ) + m.get(self.shopify.get_query_url(None, None, 'orders', False), json=self.mock_orders_all) + m.get(self.shopify.get_query_url('2020-10-20', None, 'orders', False), + json=self.mock_orders_date) + m.get(self.shopify.get_query_url(None, 2, 'orders', False), json=self.mock_orders_since) + m.get(self.shopify.get_query_url(None, None, 'orders', False) + '&financial_status=paid', + json=self.mock_orders_completed) + assert_matching_tables(self.shopify.get_orders(None, None, False), self.mock_result_all) + assert_matching_tables(self.shopify.get_orders('2020-10-20', None, False), + self.mock_result_date) + assert_matching_tables(self.shopify.get_orders(None, 2, False), self.mock_result_since) + assert_matching_tables(self.shopify.get_orders(None, None, True), + self.mock_result_completed) @requests_mock.Mocker() def test_get_query_url(self, m): - self.assertEqual( - self.shopify.get_query_url(None, None, "orders", True), - f"https://{SUBDOMAIN}.myshopify.com/admin/api/{API_VERSION}/orders/" - + "count.json?limit=250&status=any", - ) - self.assertEqual( - self.shopify.get_query_url("2020-10-20", None, "orders", True), - f"https://{SUBDOMAIN}.myshopify.com/admin/api/{API_VERSION}/orders/" - + "count.json?limit=250&status=any&created_at_min=2020-10-20T00:00:00&" - + "created_at_max=2020-10-21T00:00:00", - ) - self.assertEqual( - self.shopify.get_query_url(None, 2, "orders", True), - f"https://{SUBDOMAIN}.myshopify.com/admin/api/{API_VERSION}/orders/" - + "count.json?limit=250&status=any&since_id=2", - ) - self.assertEqual( - self.shopify.get_query_url(None, None, "orders", False), - f"https://{SUBDOMAIN}.myshopify.com/admin/api/{API_VERSION}/orders.json?" - + "limit=250&status=any", - ) + self.assertEqual(self.shopify.get_query_url(None, None, "orders", True), + f'https://{SUBDOMAIN}.myshopify.com/admin/api/{API_VERSION}/orders/' + + 'count.json?limit=250&status=any') + self.assertEqual(self.shopify.get_query_url('2020-10-20', None, "orders", True), + f'https://{SUBDOMAIN}.myshopify.com/admin/api/{API_VERSION}/orders/' + + 'count.json?limit=250&status=any&created_at_min=2020-10-20T00:00:00&' + + 'created_at_max=2020-10-21T00:00:00') + self.assertEqual(self.shopify.get_query_url(None, 2, "orders", True), + f'https://{SUBDOMAIN}.myshopify.com/admin/api/{API_VERSION}/orders/' + + 'count.json?limit=250&status=any&since_id=2') + self.assertEqual(self.shopify.get_query_url(None, None, "orders", False), + f'https://{SUBDOMAIN}.myshopify.com/admin/api/{API_VERSION}/orders.json?' + + 'limit=250&status=any') @requests_mock.Mocker() def test_graphql(self, m): m.post( - "https://{0}.myshopify.com/admin/api/{1}/graphql.json".format( - SUBDOMAIN, API_VERSION - ), - json=self.mock_graphql, + 'https://{0}.myshopify.com/admin/api/{1}/graphql.json'.format(SUBDOMAIN, API_VERSION), + json=self.mock_graphql ) - self.assertEqual( - self.shopify.graphql( - """ + self.assertEqual(self.shopify.graphql(""" {{ orders(query: "financial_status:=paid", first: 100) {{ edges {{ @@ -162,7 +121,4 @@ def test_graphql(self, m): }} }} }} - """ - ), - self.mock_graphql["data"], - ) + """), self.mock_graphql['data']) diff --git a/test/test_sisense/test_data.py b/test/test_sisense/test_data.py index 60873d2a70..72bf7d4c8b 100644 --- a/test/test_sisense/test_data.py +++ b/test/test_sisense/test_data.py @@ -1,12 +1,13 @@ -ENV_PARAMETERS = {"SISENSE_SITE_NAME": "my_site_name", "SISENSE_API_KEY": "my_api_key"} - -TEST_PUBLISH_SHARED_DASHBOARD = { - "url": "https://www.periscopedata.com/api/embedded_dashboard?data=%7B%22dashboard%22%3A7863%2C%22embed%22%3A%22v2%22%2C%22filters%22%3A%5B%7B%22name%22%3A%22Filter1%22%2C%22value%22%3A%22value1%22%7D%2C%7B%22name%22%3A%22Filter2%22%2C%22value%22%3A%221234%22%7D%5D%7D&signature=adcb671e8e24572464c31e8f9ffc5f638ab302a0b673f72554d3cff96a692740" # noqa: E501 +ENV_PARAMETERS = { + 'SISENSE_SITE_NAME': 'my_site_name', + 'SISENSE_API_KEY': 'my_api_key' } +TEST_PUBLISH_SHARED_DASHBOARD = {'url': 'https://www.periscopedata.com/api/embedded_dashboard?data=%7B%22dashboard%22%3A7863%2C%22embed%22%3A%22v2%22%2C%22filters%22%3A%5B%7B%22name%22%3A%22Filter1%22%2C%22value%22%3A%22value1%22%7D%2C%7B%22name%22%3A%22Filter2%22%2C%22value%22%3A%221234%22%7D%5D%7D&signature=adcb671e8e24572464c31e8f9ffc5f638ab302a0b673f72554d3cff96a692740'} # noqa + TEST_LIST_SHARED_DASHBOARDS = [ - "https://app.periscopedata.com/shared/abc", - "https://app.periscopedata.com/shared/def", + 'https://app.periscopedata.com/shared/abc', + 'https://app.periscopedata.com/shared/def' ] -TEST_DELETE_SHARED_DASHBOARD = {"success": True} +TEST_DELETE_SHARED_DASHBOARD = {'success': True} diff --git a/test/test_sisense/test_sisense.py b/test/test_sisense/test_sisense.py index 0148b375ba..b6725ebc87 100644 --- a/test/test_sisense/test_sisense.py +++ b/test/test_sisense/test_sisense.py @@ -2,58 +2,36 @@ import unittest from unittest import mock import requests_mock -from parsons import Sisense +from parsons.sisense.sisense import Sisense -from test.test_sisense.test_data import ( - ENV_PARAMETERS, - TEST_PUBLISH_SHARED_DASHBOARD, - TEST_LIST_SHARED_DASHBOARDS, - TEST_DELETE_SHARED_DASHBOARD, -) +from test.test_sisense.test_data import ENV_PARAMETERS, \ + TEST_PUBLISH_SHARED_DASHBOARD, TEST_LIST_SHARED_DASHBOARDS, TEST_DELETE_SHARED_DASHBOARD class TestSisense(unittest.TestCase): + def setUp(self): - self.sisense = Sisense(site_name="my_site_name", api_key="my_api_key") + self.sisense = Sisense(site_name='my_site_name', api_key='my_api_key') @mock.patch.dict(os.environ, ENV_PARAMETERS) def test_init(self): sisense = Sisense() - self.assertEqual(sisense.site_name, "my_site_name") - self.assertEqual(sisense.api_key, "my_api_key") - self.assertEqual(sisense.api.uri, "https://app.periscopedata.com/api/v1/") - self.assertEqual( - sisense.api.headers["HTTP-X-PARTNER-AUTH"], "my_site_name:my_api_key" - ) + self.assertEqual(sisense.site_name, 'my_site_name') + self.assertEqual(sisense.api_key, 'my_api_key') + self.assertEqual(sisense.api.uri, 'https://app.periscopedata.com/api/v1/') + self.assertEqual(sisense.api.headers['HTTP-X-PARTNER-AUTH'], 'my_site_name:my_api_key') @requests_mock.Mocker() def test_publish_shared_dashboard(self, m): - m.post( - f"{self.sisense.uri}shared_dashboard/create", - json=TEST_PUBLISH_SHARED_DASHBOARD, - ) - self.assertEqual( - self.sisense.publish_shared_dashboard(dashboard_id="1234"), - TEST_PUBLISH_SHARED_DASHBOARD, - ) # noqa + m.post(f'{self.sisense.uri}shared_dashboard/create', json=TEST_PUBLISH_SHARED_DASHBOARD) + self.assertEqual(self.sisense.publish_shared_dashboard(dashboard_id='1234'), TEST_PUBLISH_SHARED_DASHBOARD) # noqa @requests_mock.Mocker() def test_list_shared_dashboards(self, m): - m.post( - f"{self.sisense.uri}shared_dashboard/list", json=TEST_LIST_SHARED_DASHBOARDS - ) - self.assertEqual( - self.sisense.list_shared_dashboards(dashboard_id="1234"), - TEST_LIST_SHARED_DASHBOARDS, - ) # noqa + m.post(f'{self.sisense.uri}shared_dashboard/list', json=TEST_LIST_SHARED_DASHBOARDS) + self.assertEqual(self.sisense.list_shared_dashboards(dashboard_id='1234'), TEST_LIST_SHARED_DASHBOARDS) # noqa @requests_mock.Mocker() def test_delete_shared_dashboard(self, m): - m.post( - f"{self.sisense.uri}shared_dashboard/delete", - json=TEST_DELETE_SHARED_DASHBOARD, - ) - self.assertEqual( - self.sisense.delete_shared_dashboard(token="abc"), - TEST_DELETE_SHARED_DASHBOARD, - ) # noqa + m.post(f'{self.sisense.uri}shared_dashboard/delete', json=TEST_DELETE_SHARED_DASHBOARD) + self.assertEqual(self.sisense.delete_shared_dashboard(token='abc'), TEST_DELETE_SHARED_DASHBOARD) # noqa diff --git a/test/test_slack/test_slack.py b/test/test_slack/test_slack.py index e72669c2d8..23f85a260a 100644 --- a/test/test_slack/test_slack.py +++ b/test/test_slack/test_slack.py @@ -1,4 +1,5 @@ -from parsons import Table, Slack +from parsons.etl import Table +from parsons.notifications.slack import Slack from slackclient.exceptions import SlackClientError @@ -13,8 +14,9 @@ class TestSlack(unittest.TestCase): + def setUp(self): - os.environ["SLACK_API_TOKEN"] = "SOME_API_TOKEN" + os.environ["SLACK_API_TOKEN"] = 'SOME_API_TOKEN' self.slack = Slack() @@ -30,7 +32,7 @@ def test_slack_init(self): self.assertRaises(KeyError, Slack) - os.environ["SLACK_API_TOKEN"] = "SOME_API_TOKEN" + os.environ["SLACK_API_TOKEN"] = 'SOME_API_TOKEN' self.assertIn("SLACK_API_TOKEN", os.environ) @requests_mock.Mocker() @@ -58,57 +60,23 @@ def test_channels_all_fields(self, m): m.post("https://slack.com/api/conversations.list", json=slack_resp) fields_req = [ - "id", - "name", - "is_channel", - "created", - "creator", - "is_archived", - "is_general", - "name_normalized", - "is_shared", - "is_org_shared", - "is_member", - "is_private", - "is_mpim", - "members", - "topic_value", - "topic_creator", - "topic_last_set", - "purpose_value", - "purpose_creator", - "purpose_last_set", - "previous_names", - "num_members", - ] + "id", "name", "is_channel", "created", "creator", + "is_archived", "is_general", "name_normalized", "is_shared", + "is_org_shared", "is_member", "is_private", "is_mpim", "members", + "topic_value", "topic_creator", "topic_last_set", "purpose_value", + "purpose_creator", "purpose_last_set", "previous_names", + "num_members"] tbl = self.slack.channels(fields=fields_req) self.assertIsInstance(tbl, Table) expected_columns = [ - "id", - "name", - "is_channel", - "created", - "creator", - "is_archived", - "is_general", - "name_normalized", - "is_shared", - "is_org_shared", - "is_member", - "is_private", - "is_mpim", - "members", - "topic_value", - "topic_creator", - "topic_last_set", - "purpose_value", - "purpose_creator", - "purpose_last_set", - "previous_names", - "num_members", - ] + "id", "name", "is_channel", "created", "creator", + "is_archived", "is_general", "name_normalized", "is_shared", + "is_org_shared", "is_member", "is_private", "is_mpim", "members", + "topic_value", "topic_creator", "topic_last_set", "purpose_value", + "purpose_creator", "purpose_last_set", "previous_names", + "num_members"] self.assertListEqual(sorted(tbl.columns), sorted(expected_columns)) self.assertEqual(tbl.num_rows, 2) @@ -125,13 +93,8 @@ def test_users(self, m): self.assertIsInstance(tbl, Table) - expected_columns = [ - "id", - "name", - "deleted", - "profile_email", - "profile_real_name_normalized", - ] + expected_columns = ["id", "name", "deleted", "profile_email", + "profile_real_name_normalized"] self.assertListEqual(tbl.columns, expected_columns) self.assertEqual(tbl.num_rows, 2) @@ -144,93 +107,37 @@ def test_users_all_fields(self, m): m.post("https://slack.com/api/users.list", json=slack_resp) fields_req = [ - "id", - "team_id", - "name", - "deleted", - "color", - "real_name", - "tz", - "tz_label", - "tz_offset", - "is_admin", - "is_owner", - "is_primary_owner", - "is_restricted", - "is_ultra_restricted", - "is_bot", - "updated", - "is_app_user", - "has_2fa", - "profile_avatar_hash", - "profile_display_name", - "profile_display_name_normalized", - "profile_email", - "profile_first_name", - "profile_image_1024", - "profile_image_192", - "profile_image_24", - "profile_image_32", - "profile_image_48", - "profile_image_512", - "profile_image_72", - "profile_image_original", - "profile_last_name", - "profile_phone", - "profile_real_name", - "profile_real_name_normalized", - "profile_skype", - "profile_status_emoji", - "profile_status_text", - "profile_team", - "profile_title", - ] + "id", "team_id", "name", "deleted", "color", "real_name", "tz", + "tz_label", "tz_offset", "is_admin", "is_owner", + "is_primary_owner", "is_restricted", "is_ultra_restricted", + "is_bot", "updated", "is_app_user", "has_2fa", + "profile_avatar_hash", "profile_display_name", + "profile_display_name_normalized", "profile_email", + "profile_first_name", "profile_image_1024", "profile_image_192", + "profile_image_24", "profile_image_32", "profile_image_48", + "profile_image_512", "profile_image_72", "profile_image_original", + "profile_last_name", "profile_phone", "profile_real_name", + "profile_real_name_normalized", "profile_skype", + "profile_status_emoji", "profile_status_text", "profile_team", + "profile_title"] tbl = self.slack.users(fields=fields_req) self.assertIsInstance(tbl, Table) expected_columns = [ - "id", - "team_id", - "name", - "deleted", - "color", - "real_name", - "tz", - "tz_label", - "tz_offset", - "is_admin", - "is_owner", - "is_primary_owner", - "is_restricted", - "is_ultra_restricted", - "is_bot", - "updated", - "is_app_user", - "has_2fa", - "profile_avatar_hash", - "profile_display_name", - "profile_display_name_normalized", - "profile_email", - "profile_first_name", - "profile_image_1024", - "profile_image_192", - "profile_image_24", - "profile_image_32", - "profile_image_48", - "profile_image_512", - "profile_image_72", - "profile_image_original", - "profile_last_name", - "profile_phone", - "profile_real_name", - "profile_real_name_normalized", - "profile_skype", - "profile_status_emoji", - "profile_status_text", - "profile_team", - "profile_title", - ] + "id", "team_id", "name", "deleted", "color", "real_name", "tz", + "tz_label", "tz_offset", "is_admin", "is_owner", + "is_primary_owner", "is_restricted", "is_ultra_restricted", + "is_bot", "updated", "is_app_user", "has_2fa", + "profile_avatar_hash", "profile_display_name", + "profile_display_name_normalized", "profile_email", + "profile_first_name", "profile_image_1024", "profile_image_192", + "profile_image_24", "profile_image_32", "profile_image_48", + "profile_image_512", "profile_image_72", "profile_image_original", + "profile_last_name", "profile_phone", "profile_real_name", + "profile_real_name_normalized", "profile_skype", + "profile_status_emoji", "profile_status_text", "profile_team", + "profile_title"] self.assertListEqual(sorted(tbl.columns), sorted(expected_columns)) self.assertEqual(tbl.num_rows, 2) @@ -242,32 +149,29 @@ def test_message_channel(self, m): m.post("https://slack.com/api/chat.postMessage", json=slack_resp) - dct = self.slack.message_channel("C1H9RESGL", "Here's a message for you") + dct = self.slack.message_channel( + "C1H9RESGL", "Here's a message for you") self.assertIsInstance(dct, dict) self.assertListEqual(sorted(dct), sorted(slack_resp)) m.post( "https://slack.com/api/chat.postMessage", - json={"ok": False, "error": "invalid_auth"}, - ) + json={"ok": False, "error": "invalid_auth"}) self.assertRaises( SlackClientError, self.slack.message_channel, - "FakeChannel", - "Here's a message for you", - ) + "FakeChannel", "Here's a message for you") @requests_mock.Mocker(case_sensitive=True) def test_message(self, m): webhook = "https://hooks.slack.com/services/T1234/B1234/D12322" m.post(webhook, json={"ok": True}) Slack.message("#foobar", "this is a message", webhook) - self.assertEqual( - m._adapter.last_request.json(), - {"text": "this is a message", "channel": "#foobar"}, - ) + self.assertEqual(m._adapter.last_request.json(), + {"text": "this is a message", + "channel": "#foobar"}) self.assertEqual(m._adapter.last_request.path, "/services/T1234/B1234/D12322") @requests_mock.Mocker() @@ -279,16 +183,17 @@ def test_file_upload(self, m): m.post("https://slack.com/api/files.upload", json=slack_resp) - dct = self.slack.upload_file(["D0L4B9P0Q"], file_path) + dct = self.slack.upload_file( + ["D0L4B9P0Q"], file_path) self.assertIsInstance(dct, dict) self.assertListEqual(sorted(dct), sorted(slack_resp)) m.post( "https://slack.com/api/files.upload", - json={"ok": False, "error": "invalid_auth"}, - ) + json={"ok": False, "error": "invalid_auth"}) self.assertRaises( - SlackClientError, self.slack.upload_file, ["D0L4B9P0Q"], file_path - ) + SlackClientError, + self.slack.upload_file, + ["D0L4B9P0Q"], file_path) diff --git a/test/test_smtp.py b/test/test_smtp.py index 481af7c413..05624d7d23 100644 --- a/test/test_smtp.py +++ b/test/test_smtp.py @@ -1,4 +1,4 @@ -from parsons import SMTP +from parsons.notifications.smtp import SMTP import base64 import io import re @@ -6,12 +6,13 @@ class FakeConnection(object): + def __init__(self, result_obj): self.result_obj = result_obj def sendmail(self, sender, to, message_body): self.result_obj.result = (sender, to, message_body) - if "willfail@example.com" in to: + if 'willfail@example.com' in to: return {"willfail@example.com": (550, "User unknown")} def quit(self): @@ -19,125 +20,85 @@ def quit(self): class TestSMTP(unittest.TestCase): + def setUp(self): - self.smtp = SMTP("fake.example.com", username="fake", password="fake") + self.smtp = SMTP('fake.example.com', username='fake', password='fake') self.smtp.conn = FakeConnection(self) self.result = None self.quit_ran = False def test_send_message_simple(self): - self.smtp.send_email( - "foo@example.com", "recipient1@example.com", "Simple subject", "Fake body" - ) - self.assertEqual(self.result[0], "foo@example.com") - self.assertEqual(self.result[1], ["recipient1@example.com"]) + self.smtp.send_email('foo@example.com', 'recipient1@example.com', + 'Simple subject', 'Fake body') + self.assertEqual(self.result[0], 'foo@example.com') + self.assertEqual(self.result[1], ['recipient1@example.com']) self.assertTrue( self.result[2].endswith( - "\nto: recipient1@example.com\nfrom: foo@example.com" - "\nsubject: Simple subject\n\nFake body" - ) - ) + '\nto: recipient1@example.com\nfrom: foo@example.com' + '\nsubject: Simple subject\n\nFake body' + )) self.assertTrue(self.quit_ran) def test_send_message_html(self): - self.smtp.send_email( - "foohtml@example.com", - "recipienthtml@example.com", - "Simple subject", - "Fake body", - "

Really Fake html

", - ) - self.assertEqual(self.result[0], "foohtml@example.com") - self.assertEqual(self.result[1], ["recipienthtml@example.com"]) - self.assertRegex(self.result[2], r"

Really Fake html

\n--=======") - self.assertRegex(self.result[2], r"\nFake body\n--======") - self.assertRegex(self.result[2], r"ubject: Simple subject\n") + self.smtp.send_email('foohtml@example.com', 'recipienthtml@example.com', + 'Simple subject', 'Fake body', '

Really Fake html

') + self.assertEqual(self.result[0], 'foohtml@example.com') + self.assertEqual(self.result[1], ['recipienthtml@example.com']) + self.assertRegex(self.result[2], r'

Really Fake html

\n--=======') + self.assertRegex(self.result[2], r'\nFake body\n--======') + self.assertRegex(self.result[2], r'ubject: Simple subject\n') self.assertTrue(self.quit_ran) def test_send_message_manualclose(self): - smtp = SMTP( - "fake.example.com", username="fake", password="fake", close_manually=True - ) + smtp = SMTP('fake.example.com', username='fake', password='fake', + close_manually=True) smtp.conn = FakeConnection(self) - smtp.send_email( - "foo@example.com", "recipient1@example.com", "Simple subject", "Fake body" - ) + smtp.send_email('foo@example.com', 'recipient1@example.com', 'Simple subject', 'Fake body') self.assertFalse(self.quit_ran) def test_send_message_files(self): - named_file_content = "x,y,z\n1,2,3\r\n3,4,5\r\n" - unnamed_file_content = "foo,bar\n1,2\r\n3,4\r\n" + named_file_content = 'x,y,z\n1,2,3\r\n3,4,5\r\n' + unnamed_file_content = 'foo,bar\n1,2\r\n3,4\r\n' bytes_file_content = bytes( - [ - 71, - 73, - 70, - 56, - 57, - 97, - 1, - 0, - 1, - 0, - 0, - 255, - 0, - 44, - 0, - 0, - 0, - 0, - 1, - 0, - 1, - 0, - 0, - 2, - 0, - 59, - ] - ) + [71, 73, 70, 56, 57, 97, 1, 0, 1, 0, 0, 255, + 0, 44, 0, 0, 0, 0, 1, 0, 1, 0, 0, 2, 0, 59]) named_file = io.StringIO(named_file_content) - named_file.name = "xyz.csv" + named_file.name = 'xyz.csv' bytes_file = io.BytesIO(bytes_file_content) - bytes_file.name = "xyz.gif" + bytes_file.name = 'xyz.gif' self.smtp.send_email( - "foofiles@example.com", - "recipientfiles@example.com", - "Simple subject", - "Fake body", - files=[io.StringIO(unnamed_file_content), named_file, bytes_file], + 'foofiles@example.com', 'recipientfiles@example.com', 'Simple subject', 'Fake body', + files=[io.StringIO(unnamed_file_content), named_file, bytes_file] ) - self.assertEqual(self.result[0], "foofiles@example.com") - self.assertEqual(self.result[1], ["recipientfiles@example.com"]) - self.assertRegex(self.result[2], r"\nFake body\n--======") + self.assertEqual(self.result[0], 'foofiles@example.com') + self.assertEqual(self.result[1], ['recipientfiles@example.com']) + self.assertRegex(self.result[2], r'\nFake body\n--======') found = re.findall(r'filename="file"\n\n([\w=/]+)\n\n--===', self.result[2]) - self.assertEqual(base64.b64decode(found[0]).decode(), unnamed_file_content) + self.assertEqual(base64.b64decode(found[0]).decode(), + unnamed_file_content) found_named = re.findall( r'Content-Type: text/csv; charset="utf-8"\nMIME-Version: 1.0' - r"\nContent-Transfer-Encoding: base64\nContent-Disposition: " + r'\nContent-Transfer-Encoding: base64\nContent-Disposition: ' r'attachment; filename="xyz.csv"\n\n([\w=/]+)\n\n--======', - self.result[2], - ) - self.assertEqual(base64.b64decode(found_named[0]).decode(), named_file_content) + self.result[2]) + self.assertEqual(base64.b64decode(found_named[0]).decode(), + named_file_content) found_gif = re.findall( - r"Content-Type: image/gif\nMIME-Version: 1.0" - r"\nContent-Transfer-Encoding: base64\nContent-ID: " + r'Content-Type: image/gif\nMIME-Version: 1.0' + r'\nContent-Transfer-Encoding: base64\nContent-ID: ' r'\nContent-Disposition: attachment; filename="xyz.gif"\n\n([\w=/]+)\n\n--==', - self.result[2], - ) - self.assertEqual(base64.b64decode(found_gif[0]), bytes_file_content) + self.result[2]) + self.assertEqual(base64.b64decode(found_gif[0]), + bytes_file_content) self.assertTrue(self.quit_ran) def test_send_message_partial_fail(self): simple_msg = self.smtp._create_message_simple( - "foo@example.com", - "recipient1@example.com, willfail@example.com", - "Simple subject", - "Fake body", - ) + 'foo@example.com', + 'recipient1@example.com, willfail@example.com', + 'Simple subject', 'Fake body') send_result = self.smtp._send_message(simple_msg) - self.assertEqual(send_result, {"willfail@example.com": (550, "User unknown")}) + self.assertEqual(send_result, {"willfail@example.com": (550, "User unknown")}) diff --git a/test/test_targetsmart/test_targetsmart_api.py b/test/test_targetsmart/test_targetsmart_api.py index e8f01975a8..34a94a12b4 100644 --- a/test/test_targetsmart/test_targetsmart_api.py +++ b/test/test_targetsmart/test_targetsmart_api.py @@ -1,46 +1,39 @@ import unittest import requests_mock -from parsons import TargetSmartAPI, Table +from parsons.targetsmart.targetsmart_api import TargetSmartAPI +from parsons.etl.table import Table from test.utils import validate_list from test.responses.ts_responses import ( - address_response, - district_point, - district_expected, - district_zip, - zip_expected, - phone_response, - phone_expected, - radius_response, -) - -output_list = [ - { - "vb.tsmart_zip": "60625", - "vb.vf_g2014": "Y", - "vb.vf_g2016": "Y", - "vb.tsmart_middle_name": "H", - "ts.tsmart_midterm_general_turnout_score": "85.5", - "vb.tsmart_name_suffix": "", - "vb.voterbase_gender": "Male", - "vb.tsmart_city": "CHICAGO", - "vb.tsmart_full_address": "908 N MAIN AVE APT 2", - "vb.voterbase_phone": "5125705356", - "vb.tsmart_partisan_score": "99.6", - "vb.tsmart_last_name": "BLANKS", - "vb.voterbase_id": "IL-12568670", - "vb.tsmart_first_name": "BILLY", - "vb.voterid": "Q8W8R82Z", - "vb.voterbase_age": "37", - "vb.tsmart_state": "IL", - "vb.voterbase_registration_status": "Registered", - } -] + address_response, district_point, district_expected, district_zip, zip_expected, + phone_response, phone_expected, radius_response) + +output_list = [{ + 'vb.tsmart_zip': '60625', + 'vb.vf_g2014': 'Y', + 'vb.vf_g2016': 'Y', + 'vb.tsmart_middle_name': 'H', + 'ts.tsmart_midterm_general_turnout_score': '85.5', + 'vb.tsmart_name_suffix': '', + 'vb.voterbase_gender': 'Male', + 'vb.tsmart_city': 'CHICAGO', + 'vb.tsmart_full_address': '908 N MAIN AVE APT 2', + 'vb.voterbase_phone': '5125705356', + 'vb.tsmart_partisan_score': '99.6', + 'vb.tsmart_last_name': 'BLANKS', + 'vb.voterbase_id': 'IL-12568670', + 'vb.tsmart_first_name': 'BILLY', + 'vb.voterid': 'Q8W8R82Z', + 'vb.voterbase_age': '37', + 'vb.tsmart_state': 'IL', + 'vb.voterbase_registration_status': 'Registered' +}] class TestTargetSmartAPI(unittest.TestCase): + def setUp(self): - self.ts = TargetSmartAPI(api_key="FAKEKEY") + self.ts = TargetSmartAPI(api_key='FAKEKEY') def tearDown(self): @@ -49,172 +42,112 @@ def tearDown(self): @requests_mock.Mocker() def test_data_enhance(self, m): - json = { - "input": {"search_id": "IL-12568670", "search_id_type": "voterbase"}, - "error": None, - "output": output_list, - "output_size": 1, - "match_found": True, - "gateway_id": "b8c86f27-fb32-11e8-9cc1-45bc340a4d22", - "function_id": "b8c98093-fb32-11e8-8b25-e99c70f6fe74", - } - - expected = [ - "vb.tsmart_zip", - "vb.vf_g2014", - "vb.vf_g2016", - "vb.tsmart_middle_name", - "ts.tsmart_midterm_general_turnout_score", - "vb.tsmart_name_suffix", - "vb.voterbase_gender", - "vb.tsmart_city", - "vb.tsmart_full_address", - "vb.voterbase_phone", - "vb.tsmart_partisan_score", - "vb.tsmart_last_name", - "vb.voterbase_id", - "vb.tsmart_first_name", - "vb.voterid", - "vb.voterbase_age", - "vb.tsmart_state", - "vb.voterbase_registration_status", - ] - - m.get(self.ts.connection.uri + "person/data-enhance", json=json) + json = {'input': + {'search_id': 'IL-12568670', + 'search_id_type': 'voterbase'}, + 'error': None, + 'output': output_list, + 'output_size': 1, + 'match_found': True, + 'gateway_id': 'b8c86f27-fb32-11e8-9cc1-45bc340a4d22', + 'function_id': 'b8c98093-fb32-11e8-8b25-e99c70f6fe74'} + + expected = ['vb.tsmart_zip', 'vb.vf_g2014', 'vb.vf_g2016', 'vb.tsmart_middle_name', + 'ts.tsmart_midterm_general_turnout_score', 'vb.tsmart_name_suffix', + 'vb.voterbase_gender', 'vb.tsmart_city', 'vb.tsmart_full_address', + 'vb.voterbase_phone', 'vb.tsmart_partisan_score', 'vb.tsmart_last_name', + 'vb.voterbase_id', 'vb.tsmart_first_name', 'vb.voterid', 'vb.voterbase_age', + 'vb.tsmart_state', 'vb.voterbase_registration_status'] + + m.get(self.ts.connection.uri + 'person/data-enhance', json=json) # Assert response is expected structure - self.assertTrue(validate_list(expected, self.ts.data_enhance("IL-12568678"))) + self.assertTrue(validate_list(expected, self.ts.data_enhance('IL-12568678'))) # Assert exception on missing state with self.assertRaises(Exception): - self.ts.data_enhance("vb0001", search_id_type="votebuilder") + self.ts.data_enhance('vb0001', search_id_type='votebuilder') # Assert exception on missing state with self.assertRaises(Exception): - self.ts.data_enhance("vb0001", search_id_type="smartvan") + self.ts.data_enhance('vb0001', search_id_type='smartvan') # Assert exception on missing state with self.assertRaises(Exception): - self.ts.data_enhance("vb0001", search_id_type="voter") + self.ts.data_enhance('vb0001', search_id_type='voter') # Assert works with state provided - for i in ["votebuilder", "voter", "smartvan"]: - self.assertTrue( - validate_list( - expected, - self.ts.data_enhance("IL-12568678", search_id_type=i, state="IL"), - ) - ) + for i in ['votebuilder', 'voter', 'smartvan']: + self.assertTrue(validate_list(expected, self.ts.data_enhance( + 'IL-12568678', + search_id_type=i, + state='IL'))) @requests_mock.Mocker() def test_radius_search(self, m): - m.get(self.ts.connection.uri + "person/radius-search", json=radius_response) - - expected = [ - "similarity_score", - "distance_km", - "distance_meters", - "distance_miles", - "distance_feet", - "proximity_score", - "composite_score", - "uniqueness_score", - "confidence_indicator", - "ts.tsmart_midterm_general_turnout_score", - "vb.tsmart_city", - "vb.tsmart_first_name", - "vb.tsmart_full_address", - "vb.tsmart_last_name", - "vb.tsmart_middle_name", - "vb.tsmart_name_suffix", - "vb.tsmart_partisan_score", - "vb.tsmart_precinct_id", - "vb.tsmart_precinct_name", - "vb.tsmart_state", - "vb.tsmart_zip", - "vb.tsmart_zip4", - "vb.vf_earliest_registration_date", - "vb.vf_g2014", - "vb.vf_g2016", - "vb.vf_precinct_id", - "vb.vf_precinct_name", - "vb.vf_reg_cass_address_full", - "vb.vf_reg_cass_city", - "vb.vf_reg_cass_state", - "vb.vf_reg_cass_zip", - "vb.vf_reg_cass_zip4", - "vb.vf_registration_date", - "vb.voterbase_age", - "vb.voterbase_gender", - "vb.voterbase_id", - "vb.voterbase_phone", - "vb.voterbase_registration_status", - "vb.voterid", - ] + m.get(self.ts.connection.uri + 'person/radius-search', json=radius_response) + + expected = ['similarity_score', 'distance_km', 'distance_meters', 'distance_miles', + 'distance_feet', 'proximity_score', 'composite_score', 'uniqueness_score', + 'confidence_indicator', 'ts.tsmart_midterm_general_turnout_score', + 'vb.tsmart_city', 'vb.tsmart_first_name', 'vb.tsmart_full_address', + 'vb.tsmart_last_name', 'vb.tsmart_middle_name', 'vb.tsmart_name_suffix', + 'vb.tsmart_partisan_score', 'vb.tsmart_precinct_id', 'vb.tsmart_precinct_name', + 'vb.tsmart_state', 'vb.tsmart_zip', 'vb.tsmart_zip4', + 'vb.vf_earliest_registration_date', 'vb.vf_g2014', 'vb.vf_g2016', + 'vb.vf_precinct_id', 'vb.vf_precinct_name', 'vb.vf_reg_cass_address_full', + 'vb.vf_reg_cass_city', 'vb.vf_reg_cass_state', 'vb.vf_reg_cass_zip', + 'vb.vf_reg_cass_zip4', 'vb.vf_registration_date', 'vb.voterbase_age', + 'vb.voterbase_gender', 'vb.voterbase_id', 'vb.voterbase_phone', + 'vb.voterbase_registration_status', 'vb.voterid'] # Assert response is expected structure def rad_search(): - return self.ts.radius_search( - "BILLY", - "Burchard", - radius_size=100, - address="908 N Washtenaw, Chicago, IL", - ) + return self.ts.radius_search('BILLY', 'Burchard', radius_size=100, + address='908 N Washtenaw, Chicago, IL') self.assertTrue(validate_list(expected, rad_search())) def test_district_args(self): - self.assertRaises(ValueError, self.ts.district, search_type="address") - self.assertRaises(ValueError, self.ts.district, search_type="zip", zip4=9) - self.assertRaises(ValueError, self.ts.district, search_type="zip", zip5=0) - self.assertRaises(ValueError, self.ts.district, search_type="point") - self.assertRaises(ValueError, self.ts.district, search_type="zip") + self.assertRaises(ValueError, self.ts.district, search_type='address') + self.assertRaises(ValueError, self.ts.district, search_type='zip', zip4=9) + self.assertRaises(ValueError, self.ts.district, search_type='zip', zip5=0) + self.assertRaises(ValueError, self.ts.district, search_type='point') + self.assertRaises(ValueError, self.ts.district, search_type='zip') @requests_mock.Mocker() def test_district_point(self, m): # Test Points - m.get(self.ts.connection.uri + "service/district", json=district_point) - self.assertTrue( - validate_list( - district_expected, - self.ts.district( - search_type="point", latitude="41.898369", longitude="-87.694382" - ), - ) - ) + m.get(self.ts.connection.uri + 'service/district', json=district_point) + self.assertTrue(validate_list(district_expected, + self.ts.district(search_type='point', + latitude='41.898369', + longitude='-87.694382'))) @requests_mock.Mocker() def test_district_zip(self, m): # Test Zips - m.get(self.ts.connection.uri + "service/district", json=district_zip) - self.assertTrue( - validate_list( - zip_expected, - self.ts.district(search_type="zip", zip5="60622", zip4="7194"), - ) - ) + m.get(self.ts.connection.uri + 'service/district', json=district_zip) + self.assertTrue(validate_list(zip_expected, + self.ts.district(search_type='zip', + zip5='60622', + zip4='7194'))) @requests_mock.Mocker() def test_district_address(self, m): # Test Address - m.get(self.ts.connection.uri + "service/district", json=address_response) - self.assertTrue( - validate_list( - district_expected, - self.ts.district( - search_type="address", address="908 N Main St, Chicago, IL 60611" - ), - ) - ) + m.get(self.ts.connection.uri + 'service/district', json=address_response) + self.assertTrue(validate_list(district_expected, + self.ts.district(search_type='address', + address='908 N Main St, Chicago, IL 60611'))) @requests_mock.Mocker() def test_phone(self, m): # Test phone - m.get(self.ts.connection.uri + "person/phone-search", json=phone_response) - self.assertTrue( - validate_list(phone_expected, self.ts.phone(Table([{"phone": 4435705355}]))) - ) + m.get(self.ts.connection.uri + 'person/phone-search', json=phone_response) + self.assertTrue(validate_list(phone_expected, + self.ts.phone(Table([{'phone': 4435705355}])))) diff --git a/test/test_targetsmart/test_targetsmart_automation.py b/test/test_targetsmart/test_targetsmart_automation.py index 124ac06c6d..0b57ae4d1f 100644 --- a/test/test_targetsmart/test_targetsmart_automation.py +++ b/test/test_targetsmart/test_targetsmart_automation.py @@ -1,21 +1,21 @@ -from parsons import TargetSmartAutomation, SFTP +from parsons.targetsmart.targetsmart_automation import TargetSmartAutomation +from parsons.sftp import SFTP import unittest from test.utils import mark_live_test import os class TestTargetSmartAutomation(unittest.TestCase): + def setUp(self): self.ts = TargetSmartAutomation() - self.job_name = "a-test-job" - self.sftp = SFTP( - self.ts.sftp_host, - os.environ["TS_SFTP_USERNAME"], - os.environ["TS_SFTP_PASSWORD"], - self.ts.sftp_port, - ) - self.test_xml = "test/test_targetsmart/job_config.xml" + self.job_name = 'a-test-job' + self.sftp = SFTP(self.ts.sftp_host, + os.environ['TS_SFTP_USERNAME'], + os.environ['TS_SFTP_PASSWORD'], + self.ts.sftp_port) + self.test_xml = 'test/test_targetsmart/job_config.xml' def tearDown(self): @@ -26,12 +26,12 @@ def tearDown(self): def test_create_job_xml(self): # Assert that job xml creates the file correctly - job_xml = self.ts.create_job_xml( - "job_type", "match_job", ["test@gmail.com", "test2@gmail.com"] - ) - with open(self.test_xml, "r") as xml: + job_xml = self.ts.create_job_xml('job_type', + 'match_job', + ['test@gmail.com', 'test2@gmail.com']) + with open(self.test_xml, 'r') as xml: test_xml = xml.read() - with open(job_xml, "r") as xml: + with open(job_xml, 'r') as xml: real_xml = xml.read() self.assertEqual(test_xml, real_xml) @@ -39,41 +39,37 @@ def test_create_job_xml(self): def test_config_status(self): # Find good configuration - self.sftp.put_file( - self.test_xml, f"{self.ts.sftp_dir}/{self.job_name}.job.xml.good" - ) + self.sftp.put_file(self.test_xml, f'{self.ts.sftp_dir}/{self.job_name}.job.xml.good') self.assertTrue(self.ts.config_status(self.job_name)) self.ts.remove_files(self.job_name) # Find bad configuration - self.sftp.put_file( - self.test_xml, f"{self.ts.sftp_dir}/{self.job_name}.job.xml.bad" - ) + self.sftp.put_file(self.test_xml, f'{self.ts.sftp_dir}/{self.job_name}.job.xml.bad') self.assertRaises(ValueError, self.ts.config_status, self.job_name) @mark_live_test def test_match_status(self): # Find good configuration - good_match = "test/test_targetsmart/match_good.xml" - self.sftp.put_file(good_match, f"{self.ts.sftp_dir}/{self.job_name}.finish.xml") + good_match = 'test/test_targetsmart/match_good.xml' + self.sftp.put_file(good_match, f'{self.ts.sftp_dir}/{self.job_name}.finish.xml') self.assertTrue(self.ts.match_status(self.job_name)) self.ts.remove_files(self.job_name) # Find bad configuration - bad_match = "test/test_targetsmart/match_bad.xml" - self.sftp.put_file(bad_match, f"{self.ts.sftp_dir}/{self.job_name}.finish.xml") + bad_match = 'test/test_targetsmart/match_bad.xml' + self.sftp.put_file(bad_match, f'{self.ts.sftp_dir}/{self.job_name}.finish.xml') self.assertRaises(ValueError, self.ts.match_status, self.job_name) @mark_live_test def test_remove_files(self): # Add a file - self.sftp.put_file(self.test_xml, f"{self.ts.sftp_dir}/{self.job_name}.txt") + self.sftp.put_file(self.test_xml, f'{self.ts.sftp_dir}/{self.job_name}.txt') # Remove files self.ts.remove_files(self.job_name) # Check that file is not there - dir_list = self.sftp.list_directory(f"{self.ts.sftp_dir}/") - self.assertNotIn(f"{self.job_name}.txt", dir_list) + dir_list = self.sftp.list_directory(f'{self.ts.sftp_dir}/') + self.assertNotIn(f'{self.job_name}.txt', dir_list) diff --git a/test/test_targetsmart/test_targetsmart_smartmatch.py b/test/test_targetsmart/test_targetsmart_smartmatch.py deleted file mode 100644 index 7a8ef2f3a4..0000000000 --- a/test/test_targetsmart/test_targetsmart_smartmatch.py +++ /dev/null @@ -1,95 +0,0 @@ -import csv -import io -import gzip - -import petl -import pytest -from parsons.targetsmart.targetsmart_api import TargetSmartAPI - - -@pytest.fixture -def intable(): - return petl.wrap( - [ - [ - "first_name", - "last_name", - "address1", - "email", - "phone", - "some_unknown_field", - ], - ["Bob", "Smith", "123 Main", "bob@example.com", "1231231234", "foo"], - ["Alice", "Example", "123 Main", "", "", "bar"], - ["Sally", "Example", "123 Main", "", "", ""], - ] - ) - - -@pytest.fixture -def raw_outtable(intable): - return ( - intable.addrownumbers(field="ts__input_row") - .addrownumbers(field="ts__row") - .addrownumbers(field="matchback_id") - .convert("ts__input_row", str) - .convert("ts__row", str) - .addfield("tsmart_match_code", "Y") - .addfield("vb.voterbase_id", "OH-123") - ) - - -@pytest.fixture -def prep_intable(intable): - return intable.addrownumbers(field="matchback_id") - - -@pytest.fixture -def raw_outcsv(raw_outtable): - buf = io.StringIO() - writer = csv.writer(buf) - writer.writerows(list(raw_outtable)) - return buf.getvalue() - - -@pytest.fixture -def raw_outgz(raw_outcsv): - buf = io.BytesIO() - with gzip.GzipFile(fileobj=buf, mode="w") as gz: - gz.write(raw_outcsv.encode("utf8")) - return buf.getvalue() - - -@pytest.fixture -def final_outtable(prep_intable, raw_outtable): - return petl.leftjoin(prep_intable, raw_outtable, key="matchback_id").cutout( - "matchback_id" - ) - - -@pytest.fixture -def submit_filename(): - return "parsons_test.csv" - - -def test_smartmatch( - intable, - submit_filename, - raw_outgz, - raw_outcsv, - raw_outtable, - final_outtable, - requests_mock, -): - ts = TargetSmartAPI("mockkey") - resp1 = {"url": "https://mock_smartmatch_upload_endpoint", "error": None} - poll_resp = {"url": "https://mock_smartmatch_download_endpoint", "error": None} - requests_mock.get("https://api.targetsmart.com/service/smartmatch", json=resp1) - requests_mock.put(resp1["url"]) - requests_mock.get( - "https://api.targetsmart.com/service/smartmatch/poll", json=poll_resp - ) - requests_mock.get(poll_resp["url"], content=raw_outgz) - - results = ts.smartmatch(intable).to_petl() - assert list(final_outtable) == list(results) diff --git a/test/test_turbovote/test_turbovote.py b/test/test_turbovote/test_turbovote.py index bba18f4542..5451da3220 100644 --- a/test/test_turbovote/test_turbovote.py +++ b/test/test_turbovote/test_turbovote.py @@ -1,74 +1,54 @@ import unittest import os import requests_mock -from parsons import TurboVote +from parsons.turbovote import TurboVote from test.utils import validate_list _dir = os.path.dirname(__file__) -fake_token = {"id-token": "FAKE-TOKEN"} +fake_token = {'id-token': 'FAKE-TOKEN'} class TestTurboVote(unittest.TestCase): + def setUp(self): - self.tv = TurboVote("usr", "pwd", "myorg") + self.tv = TurboVote('usr', 'pwd', 'myorg') def test_init(self): - self.assertEqual(self.tv.username, "usr") - self.assertEqual(self.tv.password, "pwd") - self.assertEqual(self.tv.subdomain, "myorg") + self.assertEqual(self.tv.username, 'usr') + self.assertEqual(self.tv.password, 'pwd') + self.assertEqual(self.tv.subdomain, 'myorg') @requests_mock.Mocker() def test_get_token(self, m): # Assert the token is returned - m.post(self.tv.uri + "login", json=fake_token) - self.assertEqual(fake_token["id-token"], self.tv._get_token()) + m.post(self.tv.uri + 'login', json=fake_token) + self.assertEqual(fake_token['id-token'], self.tv._get_token()) @requests_mock.Mocker() def test_get_users(self, m): - expected = [ - "id", - "first-name", - "middle-name", - "last-name", - "phone", - "email", - "registered-address-street", - "registered-address-street-2", - "registered-address-city", - "registered-address-state", - "registered-address-zip", - "mailing-address-street", - "mailing-address-street-2", - "mailing-address-city", - "mailing-address-state", - "mailing-address-zip", - "dob", - "language-preference", - "hostname", - "referral-code", - "partner-comms-opt-in", - "created-at", - "updated-at", - "voter-registration-status", - "voter-registration-source", - "voter-registration-method", - "voting-method-preference", - "email subscribed", - "sms subscribed", - ] - - with open(f"{_dir}/users.txt", "r") as users_text: + expected = ['id', 'first-name', 'middle-name', 'last-name', + 'phone', 'email', 'registered-address-street', + 'registered-address-street-2', 'registered-address-city', + 'registered-address-state', 'registered-address-zip', + 'mailing-address-street', 'mailing-address-street-2', + 'mailing-address-city', 'mailing-address-state', + 'mailing-address-zip', 'dob', 'language-preference', + 'hostname', 'referral-code', 'partner-comms-opt-in', + 'created-at', 'updated-at', 'voter-registration-status', + 'voter-registration-source', 'voter-registration-method', + 'voting-method-preference', 'email subscribed', + 'sms subscribed'] + + with open(f'{_dir}/users.txt', 'r') as users_text: # Mock endpoints - m.post(self.tv.uri + "login", json=fake_token) - m.get( - self.tv.uri + f"partners/{self.tv.subdomain}.turbovote.org/users", - text=users_text.read(), - ) + m.post(self.tv.uri + 'login', json=fake_token) + m.get(self.tv.uri + f'partners/{self.tv.subdomain}.turbovote.org/users', + text=users_text.read()) self.assertTrue(validate_list(expected, self.tv.get_users())) diff --git a/test/test_twilio/test_twilio.py b/test/test_twilio/test_twilio.py index 748bc00aa6..232b3885e2 100644 --- a/test/test_twilio/test_twilio.py +++ b/test/test_twilio/test_twilio.py @@ -1,66 +1,54 @@ import os import unittest import unittest.mock as mock -from parsons import Twilio +from parsons.twilio.twilio import Twilio class TestTwilio(unittest.TestCase): + def setUp(self): - os.environ["TWILIO_ACCOUNT_SID"] = "MYFAKESID" - os.environ["TWILIO_AUTH_TOKEN"] = "MYFAKEAUTHTOKEN" + os.environ['TWILIO_ACCOUNT_SID'] = 'MYFAKESID' + os.environ['TWILIO_AUTH_TOKEN'] = 'MYFAKEAUTHTOKEN' self.twilio = Twilio() self.twilio.client = mock.MagicMock() def test_get_account(self): - fake_sid = "FAKESID" + fake_sid = 'FAKESID' self.twilio.get_account(fake_sid) assert self.twilio.client.api.accounts.called_with(fake_sid) def test_get_accounts(self): - self.twilio.get_accounts(name="MyOrg", status="active") + self.twilio.get_accounts(name='MyOrg', status='active') assert self.twilio.client.api.accounts.list.called_with( - friendly_name="MyOrg", status="active" - ) + friendly_name='MyOrg', status='active') def test_get_messages(self): - self.twilio.get_messages(date_sent="2019-10-29") - assert self.twilio.client.messages.list.called_with(date_sent="2019-10-29") + self.twilio.get_messages(date_sent='2019-10-29') + assert self.twilio.client.messages.list.called_with(date_sent='2019-10-29') def test_get_account_usage(self): # Make sure that it is calling the correct Twilio methods - self.twilio.get_account_usage(time_period="today") - assert self.twilio.client.usage.records.today.list.called_with( - time_period="today" - ) - self.twilio.get_account_usage(time_period="last_month") + self.twilio.get_account_usage(time_period='today') + assert self.twilio.client.usage.records.today.list.called_with(time_period='today') + self.twilio.get_account_usage(time_period='last_month') assert self.twilio.client.usage.records.last_month.list.called_with( - time_period="last_month" - ) - self.twilio.get_account_usage(time_period="this_month") + time_period='last_month') + self.twilio.get_account_usage(time_period='this_month') assert self.twilio.client.usage.records.this_month.list.called_with( - time_period="this_month" - ) - self.twilio.get_account_usage(time_period="yesterday") - assert self.twilio.client.usage.records.today.list.called_with( - time_period="yesterday" - ) + time_period='this_month') + self.twilio.get_account_usage(time_period='yesterday') + assert self.twilio.client.usage.records.today.list.called_with(time_period='yesterday') # Make sure that it is calling the correct Twilio methods - self.twilio.get_account_usage(time_period="daily", start_date="10-19-2019") - assert self.twilio.client.usage.records.daily.list.called_with( - start_date="10-19-2019" - ) - self.twilio.get_account_usage(time_period="monthly", start_date="10-19-2019") - assert self.twilio.client.usage.records.monthly.list.called_with( - start_date="10-19-2019" - ) - self.twilio.get_account_usage(time_period="yearly", start_date="10-19-2019") - assert self.twilio.client.usage.records.yearly.list.called_with( - start_date="10-19-2019" - ) + self.twilio.get_account_usage(time_period='daily', start_date='10-19-2019') + assert self.twilio.client.usage.records.daily.list.called_with(start_date='10-19-2019') + self.twilio.get_account_usage(time_period='monthly', start_date='10-19-2019') + assert self.twilio.client.usage.records.monthly.list.called_with(start_date='10-19-2019') + self.twilio.get_account_usage(time_period='yearly', start_date='10-19-2019') + assert self.twilio.client.usage.records.yearly.list.called_with(start_date='10-19-2019') diff --git a/test/test_utilities.py b/test/test_utilities.py index ec4a4eaf2d..7ea2e48f61 100644 --- a/test/test_utilities.py +++ b/test/test_utilities.py @@ -4,7 +4,7 @@ import shutil import datetime from unittest import mock -from parsons import Table +from parsons.etl.table import Table from parsons.utilities.datetime import date_to_timestamp, parse_date from parsons.utilities import files from parsons.utilities import check_env @@ -15,23 +15,19 @@ @pytest.mark.parametrize( ["date", "exp_ts"], - [ - pytest.param("2018-12-13", 1544659200), - pytest.param("2018-12-13T00:00:00-08:00", 1544688000), - pytest.param("", None), - pytest.param("2018-12-13 PST", None, marks=[xfail_value_error]), - ], -) + [pytest.param("2018-12-13", 1544659200), + pytest.param("2018-12-13T00:00:00-08:00", 1544688000), + pytest.param("", None), + pytest.param("2018-12-13 PST", None, marks=[xfail_value_error]), + ]) def test_date_to_timestamp(date, exp_ts): assert date_to_timestamp(date) == exp_ts def test_parse_date(): # Test parsing an ISO8601 string - expected = datetime.datetime( - year=2020, month=1, day=1, tzinfo=datetime.timezone.utc - ) - parsed = parse_date("2020-01-01T00:00:00.000 UTC") + expected = datetime.datetime(year=2020, month=1, day=1, tzinfo=datetime.timezone.utc) + parsed = parse_date('2020-01-01T00:00:00.000 UTC') assert parsed == expected, parsed # Test parsing a unix timestamp @@ -49,17 +45,17 @@ def test_parse_date(): def test_create_temp_file_for_path(): - temp_path = files.create_temp_file_for_path("some/file.gz") - assert temp_path[-3:] == ".gz" + temp_path = files.create_temp_file_for_path('some/file.gz') + assert temp_path[-3:] == '.gz' def test_create_temp_directory(): temp_directory = files.create_temp_directory() - test_file1 = f"{temp_directory}/test.txt" - test_file2 = f"{temp_directory}/test2.txt" - with open(test_file1, "w") as fh1, open(test_file2, "w") as fh2: - fh1.write("TEST") - fh2.write("TEST") + test_file1 = f'{temp_directory}/test.txt' + test_file2 = f'{temp_directory}/test2.txt' + with open(test_file1, 'w') as fh1, open(test_file2, 'w') as fh2: + fh1.write('TEST') + fh2.write('TEST') assert files.has_data(test_file1) assert files.has_data(test_file2) @@ -68,7 +64,7 @@ def test_create_temp_directory(): # Verify the temp file no longer exists with pytest.raises(FileNotFoundError): - open(test_file1, "r") + open(test_file1, 'r') def test_close_temp_file(): @@ -77,60 +73,60 @@ def test_close_temp_file(): # Verify the temp file no longer exists with pytest.raises(FileNotFoundError): - open(temp, "r") + open(temp, 'r') def test_is_gzip_path(): - assert files.is_gzip_path("some/file.gz") - assert not files.is_gzip_path("some/file") - assert not files.is_gzip_path("some/file.csv") + assert files.is_gzip_path('some/file.gz') + assert not files.is_gzip_path('some/file') + assert not files.is_gzip_path('some/file.csv') def test_suffix_for_compression_type(): - assert files.suffix_for_compression_type(None) == "" - assert files.suffix_for_compression_type("") == "" - assert files.suffix_for_compression_type("gzip") == ".gz" + assert files.suffix_for_compression_type(None) == '' + assert files.suffix_for_compression_type('') == '' + assert files.suffix_for_compression_type('gzip') == '.gz' def test_compression_type_for_path(): - assert files.compression_type_for_path("some/file") is None - assert files.compression_type_for_path("some/file.csv") is None - assert files.compression_type_for_path("some/file.csv.gz") == "gzip" + assert files.compression_type_for_path('some/file') is None + assert files.compression_type_for_path('some/file.csv') is None + assert files.compression_type_for_path('some/file.csv.gz') == 'gzip' def test_empty_file(): # Create fake files. - os.mkdir("tmp") - with open("tmp/empty.csv", "w+") as _: + os.mkdir('tmp') + with open('tmp/empty.csv', 'w+') as _: pass - Table([["1"], ["a"]]).to_csv("tmp/full.csv") + Table([['1'], ['a']]).to_csv('tmp/full.csv') - assert not files.has_data("tmp/empty.csv") - assert files.has_data("tmp/full.csv") + assert not files.has_data('tmp/empty.csv') + assert files.has_data('tmp/full.csv') # Remove fake files and dir - shutil.rmtree("tmp") + shutil.rmtree('tmp') def test_json_format(): - assert json_format.arg_format("my_arg") == "myArg" + assert json_format.arg_format('my_arg') == 'myArg' def test_remove_empty_keys(): # Assert key removed when None - test_dict = {"a": None, "b": 2} - assert json_format.remove_empty_keys(test_dict) == {"b": 2} + test_dict = {'a': None, 'b': 2} + assert json_format.remove_empty_keys(test_dict) == {'b': 2} # Assert key not removed when None - test_dict = {"a": 1, "b": 2} - assert json_format.remove_empty_keys(test_dict) == {"a": 1, "b": 2} + test_dict = {'a': 1, 'b': 2} + assert json_format.remove_empty_keys(test_dict) == {'a': 1, 'b': 2} # Assert that a nested empty string is removed - test_dict = {"a": "", "b": 2} - assert json_format.remove_empty_keys(test_dict) == {"b": 2} + test_dict = {'a': '', 'b': 2} + assert json_format.remove_empty_keys(test_dict) == {'b': 2} def test_redact_credentials(): @@ -151,24 +147,25 @@ def test_redact_credentials(): class TestCheckEnv(unittest.TestCase): + def test_environment_field(self): """Test check field""" - result = check_env.check("PARAM", "param") - self.assertEqual(result, "param") + result = check_env.check('PARAM', 'param') + self.assertEqual(result, 'param') - @mock.patch.dict(os.environ, {"PARAM": "env_param"}) + @mock.patch.dict(os.environ, {'PARAM': 'env_param'}) def test_environment_env(self): """Test check env""" - result = check_env.check("PARAM", None) - self.assertEqual(result, "env_param") + result = check_env.check('PARAM', None) + self.assertEqual(result, 'env_param') - @mock.patch.dict(os.environ, {"PARAM": "env_param"}) + @mock.patch.dict(os.environ, {'PARAM': 'env_param'}) def test_environment_field_env(self): """Test check field with env and field""" - result = check_env.check("PARAM", "param") - self.assertEqual(result, "param") + result = check_env.check('PARAM', 'param') + self.assertEqual(result, 'param') def test_envrionment_error(self): """Test check env raises error""" with self.assertRaises(KeyError) as _: - check_env.check("PARAM", None) + check_env.check('PARAM', None) diff --git a/test/test_van/responses_people.py b/test/test_van/responses_people.py index db6299fda7..66b627a424 100644 --- a/test/test_van/responses_people.py +++ b/test/test_van/responses_people.py @@ -1,156 +1,154 @@ + find_people_response = { - "vanId": 19722445, - "firstName": "Bob", - "lastName": "Smith", - "middleName": None, - "suffix": None, - "title": None, - "contactMode": None, - "organizationContactCommonName": None, - "organizationContactOfficialName": None, - "salutation": "Bob", - "formalSalutation": "Bob Smith", - "additionalSalutation": None, - "preferredPronoun": None, - "envelopeName": "Bob Smith", - "formalEnvelopeName": "Bob Smith", - "additionalEnvelopeName": None, - "contactMethodPreferenceCode": None, - "nickname": None, - "website": None, - "professionalSuffix": None, - "party": None, - "employer": None, - "occupation": None, - "sex": "F", - "dateOfBirth": "1975-09-18T00:00:00Z", - "selfReportedRace": None, - "selfReportedEthnicity": None, - "selfReportedRaces": None, - "selfReportedEthnicities": None, - "selfReportedGenders": None, - "selfReportedSexualOrientations": None, - "selfReportedLanguagePreference": None, - "emails": None, - "phones": None, - "addresses": None, - "recordedAddresses": None, - "identifiers": None, - "codes": None, - "customFields": None, - "contributionSummary": None, - "suppressions": None, - "caseworkCases": None, - "caseworkIssues": None, - "caseworkStories": None, - "notes": None, - "scores": None, - "customProperties": None, - "electionRecords": None, - "membershipStatus": None, - "organizationRoles": None, - "districts": None, + 'vanId': 19722445, + 'firstName': 'Bob', + 'lastName': 'Smith', + 'middleName': None, + 'suffix': None, + 'title': None, + 'contactMode': None, + 'organizationContactCommonName': None, + 'organizationContactOfficialName': None, + 'salutation': 'Bob', + 'formalSalutation': 'Bob Smith', + 'additionalSalutation': None, + 'preferredPronoun': None, + 'envelopeName': 'Bob Smith', + 'formalEnvelopeName': 'Bob Smith', + 'additionalEnvelopeName': None, + 'contactMethodPreferenceCode': None, + 'nickname': None, + 'website': None, + 'professionalSuffix': None, + 'party': None, + 'employer': None, + 'occupation': None, + 'sex': 'F', + 'dateOfBirth': '1975-09-18T00:00:00Z', + 'selfReportedRace': None, + 'selfReportedEthnicity': None, + 'selfReportedRaces': None, + 'selfReportedEthnicities': None, + 'selfReportedGenders': None, + 'selfReportedSexualOrientations': None, + 'selfReportedLanguagePreference': None, + 'emails': None, + 'phones': None, + 'addresses': None, + 'recordedAddresses': None, + 'identifiers': None, + 'codes': None, + 'customFields': None, + 'contributionSummary': None, + 'suppressions': None, + 'caseworkCases': None, + 'caseworkIssues': None, + 'caseworkStories': None, + 'notes': None, + 'scores': None, + 'customProperties': None, + 'electionRecords': None, + 'membershipStatus': None, + 'organizationRoles': None, + 'districts': None } get_person_response = { - "vanId": 19722445, - "firstName": "Bob", - "lastName": "Smith", - "middleName": None, - "suffix": None, - "title": None, - "contactMode": None, - "organizationContactCommonName": None, - "organizationContactOfficialName": None, - "salutation": "Bob", - "formalSalutation": "Bob Smith", - "additionalSalutation": None, - "preferredPronoun": None, - "envelopeName": "Bob Smith", - "formalEnvelopeName": "Bob Smith", - "additionalEnvelopeName": None, - "contactMethodPreferenceCode": None, - "nickname": None, - "website": None, - "professionalSuffix": None, - "party": None, - "employer": None, - "occupation": None, - "sex": "F", - "dateOfBirth": "1975-09-18T00:00:00Z", - "selfReportedRace": None, - "selfReportedEthnicity": None, - "selfReportedRaces": None, - "selfReportedEthnicities": None, - "selfReportedGenders": None, - "selfReportedSexualOrientations": None, - "selfReportedLanguagePreference": None, - "emails": [], - "phones": [ - { - "phoneId": 15406767, - "phoneNumber": "4142020792", - "ext": None, - "dateCreated": "2019-01-11T12:19:00Z", - "phoneType": "Cell", - "isPreferred": True, - "smsOptInStatus": "Unknown", - "phoneOptInStatus": "Unknown", - "isCellStatus": {"statusId": 1, "statusName": "Verified Cell"}, + 'vanId': 19722445, + 'firstName': 'Bob', + 'lastName': 'Smith', + 'middleName': None, + 'suffix': None, + 'title': None, + 'contactMode': None, + 'organizationContactCommonName': None, + 'organizationContactOfficialName': None, + 'salutation': 'Bob', + 'formalSalutation': 'Bob Smith', + 'additionalSalutation': None, + 'preferredPronoun': None, + 'envelopeName': 'Bob Smith', + 'formalEnvelopeName': 'Bob Smith', + 'additionalEnvelopeName': None, + 'contactMethodPreferenceCode': None, + 'nickname': None, + 'website': None, + 'professionalSuffix': None, + 'party': None, + 'employer': None, + 'occupation': None, + 'sex': 'F', + 'dateOfBirth': '1975-09-18T00:00:00Z', + 'selfReportedRace': None, + 'selfReportedEthnicity': None, + 'selfReportedRaces': None, + 'selfReportedEthnicities': None, + 'selfReportedGenders': None, + 'selfReportedSexualOrientations': None, + 'selfReportedLanguagePreference': None, + 'emails': [], + 'phones': [{ + 'phoneId': 15406767, + 'phoneNumber': '4142020792', + 'ext': None, + 'dateCreated': '2019-01-11T12:19:00Z', + 'phoneType': 'Cell', + 'isPreferred': True, + 'smsOptInStatus': 'Unknown', + 'phoneOptInStatus': 'Unknown', + 'isCellStatus': { + 'statusId': 1, + 'statusName': 'Verified Cell' } - ], - "addresses": [ - { - "addressId": None, - "addressLine1": "900 N Washtenaw Ave", - "addressLine2": None, - "addressLine3": None, - "city": "Chicago", - "stateOrProvince": "IL", - "zipOrPostalCode": "60622-4455", - "geoLocation": None, - "countryCode": "US", - "preview": "900 N Washtenaw Ave ", - "type": "Voting", - "isPreferred": True, - "streetAddress": "900 N Washtenaw Ave", - "displayMode": "Standardized", - }, - { - "addressId": None, - "addressLine1": "900 N Washtenaw Ave", - "addressLine2": None, - "addressLine3": None, - "city": "Chicago", - "stateOrProvince": "IL", - "zipOrPostalCode": "60622-4455", - "geoLocation": None, - "countryCode": "US", - "preview": "900 N Washtenaw Ave ", - "type": "Mailing", - "isPreferred": None, - "streetAddress": "900 N Washtenaw Ave", - "displayMode": "Standardized", - }, - ], - "recordedAddresses": [], - "identifiers": [{"type": "StateFileID", "externalId": "KLYW682Z"}], - "codes": None, - "customFields": [], - "contributionSummary": None, - "suppressions": [], - "caseworkCases": None, - "caseworkIssues": None, - "caseworkStories": None, - "notes": [{"id": 1, "isViewRestricted": True, "text": "a"}], - "scores": None, - "customProperties": None, - "electionRecords": None, - "membershipStatus": None, - "organizationRoles": None, - "districts": None, + }], + 'addresses': [{ + 'addressId': None, + 'addressLine1': '900 N Washtenaw Ave', + 'addressLine2': None, + 'addressLine3': None, + 'city': 'Chicago', + 'stateOrProvince': 'IL', + 'zipOrPostalCode': '60622-4455', + 'geoLocation': None, + 'countryCode': 'US', + 'preview': '900 N Washtenaw Ave ', + 'type': 'Voting', + 'isPreferred': True, + 'streetAddress': '900 N Washtenaw Ave', + 'displayMode': 'Standardized' + }, { + 'addressId': None, + 'addressLine1': '900 N Washtenaw Ave', + 'addressLine2': None, + 'addressLine3': None, + 'city': 'Chicago', + 'stateOrProvince': 'IL', + 'zipOrPostalCode': '60622-4455', + 'geoLocation': None, + 'countryCode': 'US', + 'preview': '900 N Washtenaw Ave ', + 'type': 'Mailing', + 'isPreferred': None, + 'streetAddress': '900 N Washtenaw Ave', + 'displayMode': 'Standardized' + }], + 'recordedAddresses': [], + 'identifiers': [{ + 'type': 'StateFileID', + 'externalId': 'KLYW682Z' + }], + 'codes': None, + 'customFields': [], + 'contributionSummary': None, + 'suppressions': [], + 'caseworkCases': None, + 'caseworkIssues': None, + 'caseworkStories': None, + 'notes': None, + 'scores': None, + 'customProperties': None, + 'electionRecords': None, + 'membershipStatus': None, + 'organizationRoles': None, + 'districts': None } - -merge_contacts_response = {"vanId": 56789} - -delete_person_response = {} diff --git a/test/test_van/responses_printed_lists.py b/test/test_van/responses_printed_lists.py deleted file mode 100644 index 9bf31e56a9..0000000000 --- a/test/test_van/responses_printed_lists.py +++ /dev/null @@ -1,93 +0,0 @@ -list_json = [ - { - "number": "43-0000", - "name": "Precinct 01 Turf 01", - "eventSignups": None, - "listSize": 62, - }, - { - "number": "44-0000", - "name": "Precinct 01 Turf 02", - "eventSignups": None, - "listSize": 54, - }, - { - "number": "45-0000", - "name": "Precinct 01 Turf 03", - "eventSignups": None, - "listSize": 70, - }, - { - "number": "46-0000", - "name": "Precinct 01 Turf 04", - "eventSignups": None, - "listSize": 66, - }, - { - "number": "48-0000", - "name": "Precinct 02 Turf 01", - "eventSignups": None, - "listSize": 45, - }, - { - "number": "49-0000", - "name": "Precinct 02 Turf 02", - "eventSignups": None, - "listSize": 33, - }, - { - "number": "50-0000", - "name": "Precinct 02 Turf 03", - "eventSignups": None, - "listSize": 63, - }, - { - "number": "51-0000", - "name": "Precinct 02 Turf 04", - "eventSignups": None, - "listSize": 66, - }, - { - "number": "52-0000", - "name": "Precinct 02 Turf 05", - "eventSignups": None, - "listSize": 39, - }, - { - "number": "55-0000", - "name": "Precinct 03 Turf 01", - "eventSignups": None, - "listSize": 53, - }, - { - "number": "56-0000", - "name": "Precinct 03 Turf 02", - "eventSignups": None, - "listSize": 45, - }, - { - "number": "57-0000", - "name": "Precinct 03 Turf 03", - "eventSignups": None, - "listSize": 50, - }, - { - "number": "58-0000", - "name": "Precinct 03 Turf 04", - "eventSignups": None, - "listSize": 54, - }, - { - "number": "59-0000", - "name": "Precinct 03 Turf 05", - "eventSignups": None, - "listSize": 65, - }, -] - -single_list_json = { - "number": "43-0000", - "name": "Precinct 01 Turf 01", - "eventSignups": [], - "listSize": 62, -} diff --git a/test/test_van/test_activist_codes.py b/test/test_van/test_activist_codes.py index 02bfeecb64..c02b76b89d 100644 --- a/test/test_van/test_activist_codes.py +++ b/test/test_van/test_activist_codes.py @@ -1,16 +1,17 @@ import unittest import os import requests_mock -from parsons import VAN +from parsons.ngpvan import VAN from test.utils import validate_list -os.environ["VAN_API_KEY"] = "SOME_KEY" +os.environ['VAN_API_KEY'] = 'SOME_KEY' class TestActivistCodes(unittest.TestCase): + def setUp(self): - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters", raise_for_status=False) + self.van = VAN(os.environ['VAN_API_KEY'], db="MyVoters", raise_for_status=False) def tearDown(self): @@ -20,36 +21,22 @@ def tearDown(self): def test_get_activist_codes(self, m): # Create response - json = { - "count": 43, - "items": [ - { - "status": "Active", - "scriptQuestion": None, - "name": "TEST CODE", - "mediumName": "TEST CODE", - "activistCodeId": 4388538, - "shortName": "TC", - "type": "Action", - "description": None, - } - ], - "nextPageLink": None, - } - - m.get(self.van.connection.uri + "activistCodes", json=json) + json = {u'count': 43, u'items': + [{u'status': u'Active', + u'scriptQuestion': None, + u'name': u'TEST CODE', + u'mediumName': u'TEST CODE', + u'activistCodeId': 4388538, + u'shortName': u'TC', + u'type': u'Action', + u'description': None}], + u'nextPageLink': None} + + m.get(self.van.connection.uri + 'activistCodes', json=json) # Expected Structure - expected = [ - "status", - "scriptQuestion", - "name", - "mediumName", - "activistCodeId", - "shortName", - "type", - "description", - ] + expected = ['status', 'scriptQuestion', 'name', 'mediumName', + 'activistCodeId', 'shortName', 'type', 'description'] # Assert response is expected structure self.assertTrue(validate_list(expected, self.van.get_activist_codes())) @@ -60,18 +47,16 @@ def test_get_activist_codes(self, m): def test_get_activist_code(self, m): # Create response - json = { - "status": "Active", - "scriptQuestion": "null", - "name": "Anti-Choice", - "mediumName": "Anti", - "activistCodeId": 4135099, - "shortName": "AC", - "type": "Constituency", - "description": "A person who has been flagged as anti-choice.", - } - - m.get(self.van.connection.uri + "activistCodes/4388538", json=json) + json = {"status": "Active", + "scriptQuestion": "null", + "name": "Anti-Choice", + "mediumName": "Anti", + "activistCodeId": 4135099, + "shortName": "AC", + "type": "Constituency", + "description": "A person who has been flagged as anti-choice."} + + m.get(self.van.connection.uri + 'activistCodes/4388538', json=json) self.assertEqual(json, self.van.get_activist_code(4388538)) @@ -79,31 +64,23 @@ def test_get_activist_code(self, m): def test_toggle_activist_code(self, m): # Test apply activist code - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) - self.assertTrue(self.van.toggle_activist_code(2335282, 4429154, "apply"), 204) + m.post(self.van.connection.uri + 'people/2335282/canvassResponses', status_code=204) + self.assertTrue(self.van.toggle_activist_code(2335282, 4429154, 'apply'), 204) # Test remove activist code - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) - self.assertTrue(self.van.toggle_activist_code(2335282, 4429154, "remove"), 204) + m.post(self.van.connection.uri + 'people/2335282/canvassResponses', status_code=204) + self.assertTrue(self.van.toggle_activist_code(2335282, 4429154, 'remove'), 204) @requests_mock.Mocker() def test_apply_activist_code(self, m): # Test apply activist code - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) + m.post(self.van.connection.uri + 'people/2335282/canvassResponses', status_code=204) self.assertEqual(self.van.apply_activist_code(2335282, 4429154), 204) @requests_mock.Mocker() def test_remove_activist_code(self, m): # Test remove activist code - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) + m.post(self.van.connection.uri + 'people/2335282/canvassResponses', status_code=204) self.assertEqual(self.van.remove_activist_code(2335282, 4429154), 204) diff --git a/test/test_van/test_bulkimport.py b/test/test_van/test_bulkimport.py index 23e4b44eae..671c8c67f6 100644 --- a/test/test_van/test_bulkimport.py +++ b/test/test_van/test_bulkimport.py @@ -2,105 +2,92 @@ import os import requests_mock import unittest.mock as mock -from parsons import VAN, Table +from parsons.ngpvan.van import VAN +from parsons.etl.table import Table from test.utils import assert_matching_tables from parsons.utilities import cloud_storage -os.environ["VAN_API_KEY"] = "SOME_KEY" +os.environ['VAN_API_KEY'] = 'SOME_KEY' class TestBulkImport(unittest.TestCase): + def setUp(self): - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters", raise_for_status=False) + self.van = VAN(os.environ['VAN_API_KEY'], + db="MyVoters", raise_for_status=False) @requests_mock.Mocker() def test_get_bulk_import_resources(self, m): - json = ["Contacts", "Contributions", "ActivistCodes", "ContactsActivistCodes"] + json = ['Contacts', 'Contributions', + 'ActivistCodes', 'ContactsActivistCodes'] - m.get(self.van.connection.uri + "bulkImportJobs/resources", json=json) + m.get(self.van.connection.uri + 'bulkImportJobs/resources', json=json) self.assertEqual(self.van.get_bulk_import_resources(), json) @requests_mock.Mocker() def test_get_bulk_import_job(self, m): - m.get(self.van.connection.uri + "bulkImportJobs/53407", json=bulk_import_job) + m.get(self.van.connection.uri + + 'bulkImportJobs/53407', json=bulk_import_job) self.assertEqual(self.van.get_bulk_import_job(53407), bulk_import_job) @requests_mock.Mocker() def test_get_bulk_import_job_results(self, m): - results_tbl = Table( - [ - [ - "BulkUploadDataID", - "ULFileID", - "PrimaryKey", - "PrimaryKeyType", - "MailingAddress_3581", - ], - ["1", "1983", "101596008", "VanID", "Processed"], - ] - ) - - bulk_import_job = { - "id": 92, - "status": "Completed", - "resourceType": "Contacts", - "webhookUrl": None, - "resultFileSizeLimitKb": 5000, - "errors": [], - "resultFiles": [ - { - "url": Table.to_csv(results_tbl), - "dateExpired": "2020-09-04T22:07:04.0770295-04:00", - } - ], - } - - m.get(self.van.connection.uri + "bulkImportJobs/53407", json=bulk_import_job) - assert_matching_tables(self.van.get_bulk_import_job_results(53407), results_tbl) + results_tbl = Table([['BulkUploadDataID', 'ULFileID', 'PrimaryKey', + 'PrimaryKeyType', 'MailingAddress_3581'], + ['1', '1983', '101596008', 'VanID', 'Processed']]) + + bulk_import_job = {'id': 92, + 'status': 'Completed', + 'resourceType': 'Contacts', + 'webhookUrl': None, + 'resultFileSizeLimitKb': 5000, + 'errors': [], + 'resultFiles': [{ + 'url': Table.to_csv(results_tbl), + 'dateExpired': '2020-09-04T22:07:04.0770295-04:00' + }] + } + + m.get(self.van.connection.uri + + 'bulkImportJobs/53407', json=bulk_import_job) + assert_matching_tables( + self.van.get_bulk_import_job_results(53407), results_tbl) @requests_mock.Mocker() def test_get_bulk_import_mapping_types(self, m): - m.get(self.van.connection.uri + "bulkImportMappingTypes", json=mapping_type) + m.get(self.van.connection.uri + + 'bulkImportMappingTypes', json=mapping_type) assert_matching_tables( - self.van.get_bulk_import_mapping_types(), Table(mapping_type) - ) + self.van.get_bulk_import_mapping_types(), Table(mapping_type)) @requests_mock.Mocker() def test_get_bulk_import_mapping_type(self, m): - m.get( - self.van.connection.uri + "bulkImportMappingTypes/ActivistCode", - json=mapping_type, - ) + m.get(self.van.connection.uri + + 'bulkImportMappingTypes/ActivistCode', json=mapping_type) - self.assertEqual( - self.van.get_bulk_import_mapping_type("ActivistCode"), mapping_type - ) + self.assertEqual(self.van.get_bulk_import_mapping_type( + 'ActivistCode'), mapping_type) @requests_mock.Mocker() def get_bulk_import_mapping_type_fields(self, m): - json = [ - {"name": "Unsubscribed", "id": "0", "parents": None}, - {"name": "Not Subscribed", "id": "1", "parents": None}, - {"name": "Subscribed", "id": "2", "parents": None}, - ] - m.get( - self.van.connection.uri - + "bulkImportMappingTypes/Email/EmailSubscriptionStatusId/values" - ) + json = [{'name': 'Unsubscribed', 'id': '0', 'parents': None}, + {'name': 'Not Subscribed', 'id': '1', 'parents': None}, + {'name': 'Subscribed', 'id': '2', 'parents': None}] + m.get(self.van.connection.uri + + 'bulkImportMappingTypes/Email/EmailSubscriptionStatusId/values') r = self.van.get_bulk_import_mapping_type_fields( - "Email", "EmailSubscriptionStatusId" - ) + 'Email', 'EmailSubscriptionStatusId') self.assertEqual(json, r) @requests_mock.Mocker() @@ -108,20 +95,19 @@ def test_post_bulk_import(self, m): # Mock Cloud Storage cloud_storage.post_file = mock.MagicMock() - cloud_storage.post_file.return_value = "https://s3.com/my_file.zip" + cloud_storage.post_file.return_value = 'https://s3.com/my_file.zip' - tbl = Table([["Vanid", "ActivistCodeID"], [1234, 345345]]) + tbl = Table([['Vanid', 'ActivistCodeID'], [1234, 345345]]) - m.post(self.van.connection.uri + "bulkImportJobs", json={"jobId": 54679}) + m.post(self.van.connection.uri + + 'bulkImportJobs', json={'jobId': 54679}) - r = self.van.post_bulk_import( - tbl, - "S3", - "ContactsActivistCodes", - [{"name": "ActivistCode"}], - "Activist Code Upload", - bucket="my-bucket", - ) + r = self.van.post_bulk_import(tbl, + 'S3', + 'ContactsActivistCodes', + [{"name": "ActivistCode"}], + 'Activist Code Upload', + bucket='my-bucket') self.assertEqual(r, 54679) @@ -130,111 +116,88 @@ def test_bulk_apply_activist_codes(self, m): # Mock Cloud Storage cloud_storage.post_file = mock.MagicMock() - cloud_storage.post_file.return_value = "https://s3.com/my_file.zip" + cloud_storage.post_file.return_value = 'https://s3.com/my_file.zip' - tbl = Table([["Vanid", "ActivistCodeID"], [1234, 345345]]) + tbl = Table([['Vanid', 'ActivistCodeID'], [1234, 345345]]) - m.post(self.van.connection.uri + "bulkImportJobs", json={"jobId": 54679}) + m.post(self.van.connection.uri + + 'bulkImportJobs', json={'jobId': 54679}) job_id = self.van.bulk_apply_activist_codes( - tbl, url_type="S3", bucket="my-bucket" - ) + tbl, url_type="S3", bucket='my-bucket') self.assertEqual(job_id, 54679) @requests_mock.Mocker() - def test_bulk_apply_suppressions(self, m): + def test_bulk_upsert_contacts(self, m): # Mock Cloud Storage cloud_storage.post_file = mock.MagicMock() - cloud_storage.post_file.return_value = "https://s3.com/my_file.zip" + cloud_storage.post_file.return_value = 'https://s3.com/my_file.zip' - tbl = Table([["Vanid", "suppressionid"], [1234, 18]]) + tbl = Table([['Vanid', 'email'], [1234, 'me@me.com']]) - m.post(self.van.connection.uri + "bulkImportJobs", json={"jobId": 54679}) + m.post(self.van.connection.uri + + 'bulkImportJobs', json={'jobId': 54679}) - job_id = self.van.bulk_apply_suppressions( - tbl, url_type="S3", bucket="my-bucket" - ) + job_id = self.van.bulk_upsert_contacts( + tbl, url_type="S3", bucket='my-bucket') self.assertEqual(job_id, 54679) - @requests_mock.Mocker() - def test_bulk_upsert_contacts(self, m): - # Mock Cloud Storage - cloud_storage.post_file = mock.MagicMock() - cloud_storage.post_file.return_value = "https://s3.com/my_file.zip" - - tbl = Table([["Vanid", "email"], [1234, "me@me.com"]]) - - m.post(self.van.connection.uri + "bulkImportJobs", json={"jobId": 54679}) - - job_id = self.van.bulk_upsert_contacts(tbl, url_type="S3", bucket="my-bucket") - - self.assertEqual(job_id, 54679) +mapping_type = {'name': 'ActivistCode', + 'displayName': 'Apply Activist Code', + 'allowMultipleMode': 'Multiple', + 'resourceTypes': ['Contacts', 'ContactsActivistCodes'], + 'fields': [{ + 'name': 'ActivistCodeID', + 'description': 'Activist Code ID', + 'hasPredefinedValues': True, + 'isRequired': True, + 'canBeMappedToColumn': True, + 'canBeMappedByName': True, + 'parents': None}, + {'name': 'CanvassedBy', + 'description': 'Recruited By, Must be a Valid User ID', + 'hasPredefinedValues': False, + 'isRequired': False, + 'canBeMappedToColumn': True, + 'canBeMappedByName': True, + 'parents': None}, + {'name': 'DateCanvassed', + 'description': 'Contacted When', + 'hasPredefinedValues': False, + 'isRequired': False, + 'canBeMappedToColumn': True, + 'canBeMappedByName': True, + 'parents': [{ + 'parentFieldName': 'CanvassedBy', + 'limitedToParentValues': None + }] + }, { + 'name': 'ContactTypeID', + 'description': 'Contacted How', + 'hasPredefinedValues': True, + 'isRequired': False, + 'canBeMappedToColumn': True, + 'canBeMappedByName': True, + 'parents': [{ + 'parentFieldName': 'CanvassedBy', + 'limitedToParentValues': None + }] + }] + } -mapping_type = { - "name": "ActivistCode", - "displayName": "Apply Activist Code", - "allowMultipleMode": "Multiple", - "resourceTypes": ["Contacts", "ContactsActivistCodes"], - "fields": [ - { - "name": "ActivistCodeID", - "description": "Activist Code ID", - "hasPredefinedValues": True, - "isRequired": True, - "canBeMappedToColumn": True, - "canBeMappedByName": True, - "parents": None, - }, - { - "name": "CanvassedBy", - "description": "Recruited By, Must be a Valid User ID", - "hasPredefinedValues": False, - "isRequired": False, - "canBeMappedToColumn": True, - "canBeMappedByName": True, - "parents": None, - }, - { - "name": "DateCanvassed", - "description": "Contacted When", - "hasPredefinedValues": False, - "isRequired": False, - "canBeMappedToColumn": True, - "canBeMappedByName": True, - "parents": [ - {"parentFieldName": "CanvassedBy", "limitedToParentValues": None} - ], - }, - { - "name": "ContactTypeID", - "description": "Contacted How", - "hasPredefinedValues": True, - "isRequired": False, - "canBeMappedToColumn": True, - "canBeMappedByName": True, - "parents": [ - {"parentFieldName": "CanvassedBy", "limitedToParentValues": None} - ], - }, - ], -} - -bulk_import_job = { - "id": 92, - "status": "Completed", - "resourceType": "Contacts", - "webhookUrl": None, - "resultFileSizeLimitKb": 5000, - "errors": [], - "resultFiles": [ - { - "url": "https://ngpvan.com/bulk-import-jobs/f023.csv", - "dateExpired": "2020-09-04T22:07:04.0770295-04:00", - } - ], -} +bulk_import_job = {'id': 92, + 'status': 'Completed', + 'resourceType': 'Contacts', + 'webhookUrl': None, + 'resultFileSizeLimitKb': 5000, + 'errors': [], + 'resultFiles': [{ + 'url': 'https://ngpvan.com/bulk-import-jobs/f023.csv', + 'dateExpired': '2020-09-04T22:07:04.0770295-04:00' + }] + } diff --git a/test/test_van/test_changed_entities.py b/test/test_van/test_changed_entities.py index 8eb92be3bd..01a77a6cc7 100644 --- a/test/test_van/test_changed_entities.py +++ b/test/test_van/test_changed_entities.py @@ -2,97 +2,84 @@ import os import requests_mock import unittest.mock as mock -from parsons import VAN, Table +from parsons.ngpvan.van import VAN +from parsons.etl.table import Table from test.utils import assert_matching_tables class TestNGPVAN(unittest.TestCase): + def setUp(self): - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters", raise_for_status=False) + self.van = VAN(os.environ['VAN_API_KEY'], db="MyVoters", raise_for_status=False) @requests_mock.Mocker() def test_get_changed_entity_resources(self, m): - json = ["ActivistCodes", "ContactHistory", "Contacts", "ContactsActivistCodes"] - m.get(self.van.connection.uri + "changedEntityExportJobs/resources", json=json) + json = ['ActivistCodes', 'ContactHistory', 'Contacts', 'ContactsActivistCodes'] + m.get(self.van.connection.uri + 'changedEntityExportJobs/resources', json=json) self.assertEqual(json, self.van.get_changed_entity_resources()) @requests_mock.Mocker() def test_get_changed_entity_resource_fields(self, m): - json = [ - { - "fieldName": "ActivistCodeID", - "fieldType": "N", - "maxTextboxCharacters": None, - "isCoreField": True, - "availableValues": None, - }, - { - "fieldName": "ActivistCodeType", - "fieldType": "T", - "maxTextboxCharacters": 20, - "isCoreField": True, - "availableValues": None, - }, - { - "fieldName": "Campaign", - "fieldType": "T", - "maxTextboxCharacters": 150, - "isCoreField": True, - "availableValues": None, - }, - ] - - m.get( - self.van.connection.uri + "changedEntityExportJobs/fields/ActivistCodes", - json=json, - ) + json = [{ + 'fieldName': 'ActivistCodeID', + 'fieldType': 'N', + 'maxTextboxCharacters': None, + 'isCoreField': True, + 'availableValues': None + }, { + 'fieldName': 'ActivistCodeType', + 'fieldType': 'T', + 'maxTextboxCharacters': 20, + 'isCoreField': True, + 'availableValues': None + }, { + 'fieldName': 'Campaign', + 'fieldType': 'T', + 'maxTextboxCharacters': 150, + 'isCoreField': True, + 'availableValues': None + }] + + m.get(self.van.connection.uri + 'changedEntityExportJobs/fields/ActivistCodes', json=json) assert_matching_tables( - Table(json), self.van.get_changed_entity_resource_fields("ActivistCodes") - ) + Table(json), self.van.get_changed_entity_resource_fields('ActivistCodes')) @requests_mock.Mocker() def test_get_changed_entities(self, m): - json = { - "dateChangedFrom": "2021-10-10T00:00:00-04:00", - "dateChangedTo": "2021-10-11T00:00:00-04:00", - "files": [], - "message": "Created export job", - "code": None, - "exportedRecordCount": 0, - "exportJobId": 2170181229, - "jobStatus": "Pending", - } + json = {"dateChangedFrom": "2021-10-10T00:00:00-04:00", + "dateChangedTo": "2021-10-11T00:00:00-04:00", + "files": [], + "message": "Created export job", + "code": None, + "exportedRecordCount": 0, + "exportJobId": 2170181229, + "jobStatus": "Pending"} json2 = { - "dateChangedFrom": "2021-10-10T00:00:00-04:00", - "dateChangedTo": "2021-10-11T00:00:00-04:00", - "files": [ - { - "downloadUrl": "https://box.com/file.csv", - "dateExpired": "2021-11-03T15:27:01.8687339-04:00", - } - ], - "message": "Finished processing export job", - "code": None, - "exportedRecordCount": 6110, - "exportJobId": 2170181229, - "jobStatus": "Complete", - } - - tbl = Table([{"a": 1, "b": 2}]) - - m.post(self.van.connection.uri + "changedEntityExportJobs", json=json) - m.get( - self.van.connection.uri + "changedEntityExportJobs/2170181229", json=json2 - ) + "dateChangedFrom": "2021-10-10T00:00:00-04:00", + "dateChangedTo": "2021-10-11T00:00:00-04:00", + "files": [ + {"downloadUrl": "https://box.com/file.csv", + "dateExpired": "2021-11-03T15:27:01.8687339-04:00"} + ], + "message": "Finished processing export job", + "code": None, + "exportedRecordCount": 6110, + "exportJobId": 2170181229, + "jobStatus": "Complete"} + + tbl = Table([{'a': 1, 'b': 2}]) + + m.post(self.van.connection.uri + 'changedEntityExportJobs', json=json) + m.get(self.van.connection.uri + 'changedEntityExportJobs/2170181229', json=json2) Table.from_csv = mock.MagicMock() Table.from_csv.return_value = tbl - out_tbl = self.van.get_changed_entities("ContactHistory", "2021-10-10") + out_tbl = self.van.get_changed_entities('ContactHistory', '2021-10-10') assert_matching_tables(out_tbl, tbl) diff --git a/test/test_van/test_codes.py b/test/test_van/test_codes.py index a875e9c71b..704b54f61d 100644 --- a/test/test_van/test_codes.py +++ b/test/test_van/test_codes.py @@ -1,17 +1,18 @@ import unittest import os import requests_mock -from parsons import VAN +from parsons.ngpvan.van import VAN from test.utils import assert_matching_tables from requests.exceptions import HTTPError -os.environ["VAN_API_KEY"] = "SOME_KEY" +os.environ['VAN_API_KEY'] = 'SOME_KEY' class TestCodes(unittest.TestCase): + def setUp(self): - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters") + self.van = VAN(os.environ['VAN_API_KEY'], db="MyVoters") def tearDown(self): @@ -20,68 +21,38 @@ def tearDown(self): @requests_mock.Mocker() def test_get_codes(self, m): - json = { - "items": [ - { - "codeId": 1004916, - "parentCodeId": None, - "name": "Data Entry", - "description": "for test.", - "codePath": "Data Entry", - "createdByName": "", - "dateCreated": "2018-07-13T15:16:00Z", - "supportedEntities": None, - "codeType": "Tag", - "campaign": None, - "contactType": None, - } - ], - "nextPageLink": None, - "count": 8, - } - - m.get(self.van.connection.uri + "codes", json=json) - assert_matching_tables(json["items"], self.van.get_codes()) - - @requests_mock.Mocker() - def test_get_code(self, m): - - json = { - "codeId": 1004916, - "parentCodeId": None, - "name": "Data Entry", - "description": "for test.", - "codePath": "Data Entry", - "createdByName": "", - "dateCreated": "2018-07-13T15:16:00Z", - "supportedEntities": None, - "codeType": "Tag", - "campaign": None, - "contactType": None, - } - - m.get(self.van.connection.uri + "codes/1004916", json=json) - self.assertEqual(json, self.van.get_code(1004916)) + json = {'items': [{'codeId': 1004916, + 'parentCodeId': None, + 'name': 'Data Entry', + 'description': 'for test.', + 'codePath': 'Data Entry', + 'createdByName': '', + 'dateCreated': '2018-07-13T15:16:00Z', + 'supportedEntities': None, + 'codeType': 'Tag', + 'campaign': None, + 'contactType': None}], + 'nextPageLink': None, 'count': 8} + + m.get(self.van.connection.uri + 'codes', json=json) + assert_matching_tables(json['items'], self.van.get_codes()) @requests_mock.Mocker() def test_get_code_types(self, m): - json = ["Tag", "SourceCode"] - m.get(self.van.connection.uri + "codeTypes", json=json) + json = ['Tag', 'SourceCode'] + m.get(self.van.connection.uri + 'codeTypes', json=json) self.assertEqual(json, self.van.get_code_types()) @requests_mock.Mocker() def test_create_code(self, m): - m.post(self.van.connection.uri + "codes", json=1004960, status_code=201) + m.post(self.van.connection.uri + 'codes', json=1004960, status_code=201) # Test that it doesn't throw and error - r = self.van.create_code( - "Test Code", - supported_entities=[ - {"name": "Events", "is_searchable": True, "is_applicable": True} - ], - ) + r = self.van.create_code('Test Code', supported_entities=[{'name': 'Events', + 'is_searchable': True, + 'is_applicable': True}]) self.assertEqual(r, 1004960) @@ -89,27 +60,27 @@ def test_create_code(self, m): def test_update_code(self, m): # Test a good input - m.put(self.van.connection.uri + "codes/1004960", status_code=204) - self.van.update_code(1004960, name="Test") + m.put(self.van.connection.uri + 'codes/1004960', status_code=204) + self.van.update_code(1004960, name='Test') # Test a bad input - m.put(self.van.connection.uri + "codes/100496Q", status_code=404) - self.assertRaises(HTTPError, self.van.update_code, "100496Q") + m.put(self.van.connection.uri + 'codes/100496Q', status_code=404) + self.assertRaises(HTTPError, self.van.update_code, '100496Q') @requests_mock.Mocker() def test_delete_code(self, m): # Test a good input - m.delete(self.van.connection.uri + "codes/1004960", status_code=204) + m.delete(self.van.connection.uri + 'codes/1004960', status_code=204) self.van.delete_code(1004960) # Test a bad input - m.delete(self.van.connection.uri + "codes/100496Q", status_code=404) - self.assertRaises(HTTPError, self.van.delete_code, "100496Q") + m.delete(self.van.connection.uri + 'codes/100496Q', status_code=404) + self.assertRaises(HTTPError, self.van.delete_code, '100496Q') @requests_mock.Mocker() def test_get_code_supported_entities(self, m): - json = ["Contacts", "Events", "Locations"] - m.get(self.van.connection.uri + "codes/supportedEntities", json=json) + json = ['Contacts', 'Events', 'Locations'] + m.get(self.van.connection.uri + 'codes/supportedEntities', json=json) self.assertEqual(json, self.van.get_code_supported_entities()) diff --git a/test/test_van/test_contact_notes.py b/test/test_van/test_contact_notes.py deleted file mode 100644 index 010aa6676b..0000000000 --- a/test/test_van/test_contact_notes.py +++ /dev/null @@ -1,24 +0,0 @@ -from parsons import VAN -from test.test_van.responses_people import get_person_response -from test.utils import assert_matching_tables -import requests_mock -import os -import unittest - -os.environ["VAN_API_KEY"] = "SOME_KEY" - - -class TestNGPVAN(unittest.TestCase): - def setUp(self): - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters", raise_for_status=False) - - @requests_mock.Mocker() - def test_create_contact_note(self, m): - m.post(self.van.connection.uri + "people/1/notes", status_code=204) - self.van.create_contact_note(1, "a", True) - - @requests_mock.Mocker() - def test_get_contact_notes(self, m): - json = get_person_response["notes"] - m.get(self.van.connection.uri + "people/1/notes", json=json) - assert_matching_tables(json, self.van.get_contact_notes("1")) diff --git a/test/test_van/test_custom_fields.py b/test/test_van/test_custom_fields.py index db735e4003..f3536e6c5b 100644 --- a/test/test_van/test_custom_fields.py +++ b/test/test_van/test_custom_fields.py @@ -1,70 +1,57 @@ import unittest import os import requests_mock -from parsons import VAN +from parsons.ngpvan.van import VAN from test.utils import assert_matching_tables -custom_field = [ - { - "customFieldId": 157, - "customFieldParentId": None, - "customFieldName": "Education level", - "customFieldGroupId": 52, - "customFieldGroupName": "Education", - "customFieldGroupType": "Contacts", - "customFieldTypeId": "S", - "isEditable": True, - "isExportable": False, - "maxTextboxCharacters": None, - "availableValues": [ - {"id": 1, "name": "High School diploma", "parentValueId": None}, - {"id": 2, "name": "College degree", "parentValueId": None}, - {"id": 3, "name": "Postgraduate degree", "parentValueId": None}, - {"id": 4, "name": "Doctorate", "parentValueId": None}, - ], - } -] +custom_field = [{ + "customFieldId": 157, + "customFieldParentId": None, + "customFieldName": "Education level", + "customFieldGroupId": 52, + "customFieldGroupName": "Education", + "customFieldGroupType": "Contacts", + "customFieldTypeId": "S", + "isEditable": True, + "isExportable": False, + "maxTextboxCharacters": None, + "availableValues": [ + {"id": 1, "name": "High School diploma", "parentValueId": None}, + {"id": 2, "name": "College degree", "parentValueId": None}, + {"id": 3, "name": "Postgraduate degree", "parentValueId": None}, + {"id": 4, "name": "Doctorate", "parentValueId": None}]}] custom_field_values = [ - { - "customFieldId": 157, - "id": 1, - "name": "High School diploma", - "parentValueId": None, - }, - {"customFieldId": 157, "id": 2, "name": "College degree", "parentValueId": None}, - { - "customFieldId": 157, - "id": 3, - "name": "Postgraduate degree", - "parentValueId": None, - }, - {"customFieldId": 157, "id": 4, "name": "Doctorate", "parentValueId": None}, -] + {'customFieldId': 157, 'id': 1, 'name': 'High School diploma', 'parentValueId': None}, + {'customFieldId': 157, 'id': 2, 'name': 'College degree', 'parentValueId': None}, + {'customFieldId': 157, 'id': 3, 'name': 'Postgraduate degree', 'parentValueId': None}, + {'customFieldId': 157, 'id': 4, 'name': 'Doctorate', 'parentValueId': None} + ] -os.environ["VAN_API_KEY"] = "SOME_KEY" +os.environ['VAN_API_KEY'] = 'SOME_KEY' class TestCustomFields(unittest.TestCase): + def setUp(self): - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters") + self.van = VAN(os.environ['VAN_API_KEY'], db="MyVoters") @requests_mock.Mocker() def test_get_custom_fields(self, m): - m.get(self.van.connection.uri + "customFields", json=custom_field) + m.get(self.van.connection.uri + 'customFields', json=custom_field) assert_matching_tables(custom_field, self.van.get_custom_fields()) @requests_mock.Mocker() def test_get_custom_field_values(self, m): - m.get(self.van.connection.uri + "customFields", json=custom_field) + m.get(self.van.connection.uri + 'customFields', json=custom_field) assert_matching_tables(custom_field_values, self.van.get_custom_fields_values()) @requests_mock.Mocker() def test_get_custom_field(self, m): - m.get(self.van.connection.uri + "customFields/157", json=custom_field) + m.get(self.van.connection.uri + 'customFields/157', json=custom_field) assert_matching_tables(custom_field, self.van.get_custom_field(157)) diff --git a/test/test_van/test_events.py b/test/test_van/test_events.py index 9bd7cf6b03..0f51aa93e2 100644 --- a/test/test_van/test_events.py +++ b/test/test_van/test_events.py @@ -1,17 +1,18 @@ import unittest import os import requests_mock -from parsons import VAN +from parsons.ngpvan.van import VAN from test.utils import validate_list -os.environ["VAN_API_KEY"] = "SOME_KEY" +os.environ['VAN_API_KEY'] = 'SOME_KEY' class TestNGPVAN(unittest.TestCase): + def setUp(self): - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters") + self.van = VAN(os.environ['VAN_API_KEY'], db="MyVoters") def tearDown(self): @@ -20,60 +21,57 @@ def tearDown(self): @requests_mock.Mocker() def test_get_events(self, m): - json = { - "count": 6, - "items": [ - { - "eventId": 1062, - "startDate": "2010-05-25T11:00:00-05:00", - "codes": "null", - "endDate": "2010-05-25T15:00:00-05:00", - "name": "Sample", - "roles": "null", - "isOnlyEditableByCreatingUser": "true", - "ticketCategories": "null", - "eventType": {"eventTypeId": 29166, "name": "Confirmation Calls"}, - "notes": "null", - "districtFieldValue": "null", - "locations": "null", - "shifts": "null", - "voterRegistrationBatches": "null", - "createdDate": "2010-05-25T11:55:00Z", - "financialProgram": "null", - "shortName": "Sample", - "isPubliclyViewable": "null", - "isActive": "true", - "description": "This is a sample", - } - ], - "nextPageLink": None, - } - - m.get(self.van.connection.uri + "events", json=json) + json = {'count': 6, 'items': [ + {"eventId": 1062, + "startDate": "2010-05-25T11:00:00-05:00", + "codes": "null", + "endDate": "2010-05-25T15:00:00-05:00", + "name": "Sample", + "roles": "null", + "isOnlyEditableByCreatingUser": "true", + "ticketCategories": "null", + "eventType": { + "eventTypeId": 29166, + "name": "Confirmation Calls" + }, + "notes": "null", + "districtFieldValue": "null", + "locations": "null", + "shifts": "null", + "voterRegistrationBatches": "null", + "createdDate": "2010-05-25T11:55:00Z", + "financialProgram": "null", + "shortName": "Sample", + "isPubliclyViewable": "null", + "isActive": "true", + "description": "This is a sample" + }], + 'nextPageLink': None} + + m.get(self.van.connection.uri + 'events', json=json) # Expected Structure expected = [ - "eventId", - "startDate", - "codes", - "endDate", - "name", - "roles", - "isOnlyEditableByCreatingUser", - "ticketCategories", - "eventType", - "notes", - "districtFieldValue", - "locations", - "shifts", - "voterRegistrationBatches", - "createdDate", - "financialProgram", - "shortName", - "isPubliclyViewable", - "isActive", - "description", - ] + 'eventId', + 'startDate', + 'codes', + 'endDate', + 'name', + 'roles', + 'isOnlyEditableByCreatingUser', + 'ticketCategories', + 'eventType', + 'notes', + 'districtFieldValue', + 'locations', + 'shifts', + 'voterRegistrationBatches', + 'createdDate', + 'financialProgram', + 'shortName', + 'isPubliclyViewable', + 'isActive', + 'description'] self.assertTrue(validate_list(expected, self.van.get_events())) @@ -82,110 +80,90 @@ def test_get_event(self, m): event_id = 1062 - json = { - "eventId": 1062, - "startDate": "2010-05-25T11:00:00-05:00", - "codes": "null", - "endDate": "2010-05-25T15:00:00-05:00", - "name": "Sample", - "roles": "null", - "isOnlyEditableByCreatingUser": "true", - "ticketCategories": "null", - "eventType": {"eventTypeId": 29166, "name": "Confirmation Calls"}, - "notes": "null", - "districtFieldValue": "null", - "locations": "null", - "shifts": "null", - "voterRegistrationBatches": "null", - "createdDate": "2010-05-25T11:55:00Z", - "financialProgram": "null", - "shortName": "Sample", - "isPubliclyViewable": "null", - "isActive": "true", - "description": "This is a sample", - } - - m.get(self.van.connection.uri + "events/{}".format(event_id), json=json) + json = {"eventId": 1062, + "startDate": "2010-05-25T11:00:00-05:00", + "codes": "null", + "endDate": "2010-05-25T15:00:00-05:00", + "name": "Sample", + "roles": "null", + "isOnlyEditableByCreatingUser": "true", + "ticketCategories": "null", + "eventType": { + "eventTypeId": 29166, + "name": "Confirmation Calls" + }, + "notes": "null", + "districtFieldValue": "null", + "locations": "null", + "shifts": "null", + "voterRegistrationBatches": "null", + "createdDate": "2010-05-25T11:55:00Z", + "financialProgram": "null", + "shortName": "Sample", + "isPubliclyViewable": "null", + "isActive": "true", + "description": "This is a sample" + } + + m.get(self.van.connection.uri + 'events/{}'.format(event_id), json=json) self.assertEqual(json, self.van.get_event(event_id)) @requests_mock.Mocker() def test_create_event(self, m): - m.post(self.van.connection.uri + "events", json=750000984, status_code=204) + m.post(self.van.connection.uri + 'events', json=750000984, status_code=204) # Test that it doesn't throw and error - r = self.van.create_event( - "Canvass 01", - "Can01", - "2016-06-01", - "2016-06-02", - 296199, - [259236], - publicly_viewable="True", - editable=False, - ) + r = self.van.create_event('Canvass 01', 'Can01', '2016-06-01', '2016-06-02', + 296199, [259236], publicly_viewable='True', + editable=False) self.assertEqual(r, 750000984) @requests_mock.Mocker() def test_get_event_types(self, m): - json = [ - { - "eventTypeId": 296199, - "name": "Block Party", - "canHaveMultipleShifts": False, - "canHaveMultipleLocations": False, - "canHaveGoals": False, - "canHaveRoleMaximums": False, - "canHaveRoleMinimums": False, - "canBeRepeatable": False, - "roles": [ - {"roleId": 259236, "name": "Attendee", "isEventLead": False}, - {"roleId": 259235, "name": "Supporter", "isEventLead": False}, - {"roleId": 259234, "name": "Volunteer", "isEventLead": False}, - ], - "statuses": [ - {"statusId": 4, "name": "Invited"}, - {"statusId": 18, "name": "Left Msg"}, - {"statusId": 14, "name": "Tentative"}, - {"statusId": 3, "name": "Declined"}, - {"statusId": 11, "name": "Confirmed"}, - {"statusId": 23, "name": "Conf Twice"}, - {"statusId": 2, "name": "Completed"}, - {"statusId": 15, "name": "Walk In"}, - {"statusId": 6, "name": "No Show"}, - {"statusId": 29, "name": "Texted"}, - ], - "color": "#7F7F7F", - "isAtLeastOneLocationRequired": False, - "defaultLocation": None, - "isSharedWithMasterCommitteeByDefault": False, - "isSharedWithChildCommitteesByDefault": False, - "isOnlineActionsAvailable": False, - } - ] - - m.get(self.van.connection.uri + "events/types", json=json) - - expected = [ - "eventTypeId", - "name", - "canHaveMultipleShifts", - "canHaveMultipleLocations", - "canHaveGoals", - "canHaveRoleMaximums", - "canHaveRoleMinimums", - "canBeRepeatable", - "roles", - "statuses", - "color", - "isAtLeastOneLocationRequired", - "defaultLocation", - "isSharedWithMasterCommitteeByDefault", - "isSharedWithChildCommitteesByDefault", - "isOnlineActionsAvailable", - ] + json = [{'eventTypeId': 296199, + 'name': 'Block Party', + 'canHaveMultipleShifts': False, + 'canHaveMultipleLocations': False, + 'canHaveGoals': False, + 'canHaveRoleMaximums': False, + 'canHaveRoleMinimums': False, + 'canBeRepeatable': False, + 'roles': [{'roleId': 259236, + 'name': 'Attendee', + 'isEventLead': False}, + {'roleId': 259235, + 'name': 'Supporter', + 'isEventLead': False}, + {'roleId': 259234, + 'name': 'Volunteer', + 'isEventLead': False}], + 'statuses': [{'statusId': 4, 'name': 'Invited'}, + {'statusId': 18, 'name': 'Left Msg'}, + {'statusId': 14, 'name': 'Tentative'}, + {'statusId': 3, 'name': 'Declined'}, + {'statusId': 11, 'name': 'Confirmed'}, + {'statusId': 23, 'name': 'Conf Twice'}, + {'statusId': 2, 'name': 'Completed'}, + {'statusId': 15, 'name': 'Walk In'}, + {'statusId': 6, 'name': 'No Show'}, + {'statusId': 29, 'name': 'Texted'}], + 'color': '#7F7F7F', + 'isAtLeastOneLocationRequired': False, + 'defaultLocation': None, + 'isSharedWithMasterCommitteeByDefault': False, + 'isSharedWithChildCommitteesByDefault': False, + 'isOnlineActionsAvailable': False}] + + m.get(self.van.connection.uri + 'events/types', json=json) + + expected = ['eventTypeId', 'name', 'canHaveMultipleShifts', 'canHaveMultipleLocations', + 'canHaveGoals', 'canHaveRoleMaximums', 'canHaveRoleMinimums', 'canBeRepeatable', + 'roles', 'statuses', 'color', 'isAtLeastOneLocationRequired', 'defaultLocation', + 'isSharedWithMasterCommitteeByDefault', 'isSharedWithChildCommitteesByDefault', + 'isOnlineActionsAvailable'] self.assertTrue(validate_list(expected, self.van.get_event_types())) diff --git a/test/test_van/test_locations.py b/test/test_van/test_locations.py index 6b904b643f..66ac64bf5f 100644 --- a/test/test_van/test_locations.py +++ b/test/test_van/test_locations.py @@ -1,68 +1,46 @@ import unittest import os import requests_mock -from parsons import VAN +from parsons.ngpvan.van import VAN from test.utils import validate_list from requests.exceptions import HTTPError -os.environ["VAN_API_KEY"] = "SOME_KEY" - - -location_json = { - "locationId": 34, - "name": "Chicagowide", - "displayName": "Chicagowide, Chicago, IL ", - "address": { - "addressId": None, - "addressLine1": None, - "addressLine2": None, - "addressLine3": None, - "city": "Chicago", - "stateOrProvince": "IL", - "zipOrPostalCode": None, - "geoLocation": None, - "countryCode": "US", - "preview": "Chicago, IL ", - "type": None, - "isPreferred": None, - "streetAddress": None, - "displayMode": "Standardized", - }, - "id": 34, - "notes": None, - "codes": None, -} - -expected_loc = [ - "locationId", - "name", - "displayName", - "id", - "notes", - "codes", - "addressId", - "addressLine1", - "addressLine2", - "addressLine3", - "city", - "countryCode", - "displayMode", - "isPreferred", - "preview", - "stateOrProvince", - "streetAddress", - "type", - "zipOrPostalCode", -] +os.environ['VAN_API_KEY'] = 'SOME_KEY' + + +location_json = {'locationId': 34, + 'name': 'Chicagowide', + 'displayName': 'Chicagowide, Chicago, IL ', + 'address': {'addressId': None, + 'addressLine1': None, + 'addressLine2': None, + 'addressLine3': None, + 'city': 'Chicago', + 'stateOrProvince': 'IL', + 'zipOrPostalCode': None, + 'geoLocation': None, + 'countryCode': 'US', + 'preview': 'Chicago, IL ', + 'type': None, + 'isPreferred': None, + 'streetAddress': None, + 'displayMode': 'Standardized'}, + 'id': 34, + 'notes': None, + 'codes': None} + +expected_loc = ['locationId', 'name', 'displayName', 'id', 'notes', 'codes', 'addressId', + 'addressLine1', 'addressLine2', 'addressLine3', 'city', 'countryCode', + 'displayMode', 'isPreferred', 'preview', 'stateOrProvince', 'streetAddress', + 'type', 'zipOrPostalCode'] class TestLocations(unittest.TestCase): + def setUp(self): - self.van = VAN( - os.environ["VAN_API_KEY"], db="EveryAction", raise_for_status=False - ) + self.van = VAN(os.environ['VAN_API_KEY'], db="EveryAction", raise_for_status=False) def tearDown(self): @@ -71,8 +49,8 @@ def tearDown(self): @requests_mock.Mocker() def test_get_locations(self, m): - json = {"items": [location_json], "nextPageLink": None, "count": 1} - m.get(self.van.connection.uri + "locations", json=json) + json = {'items': [location_json], 'nextPageLink': None, 'count': 1} + m.get(self.van.connection.uri + 'locations', json=json) self.assertTrue(validate_list(expected_loc, self.van.get_locations())) @@ -80,18 +58,18 @@ def test_get_locations(self, m): def test_get_location(self, m): # Valid location id - m.get(self.van.connection.uri + "locations/34", json=location_json) + m.get(self.van.connection.uri + 'locations/34', json=location_json) self.assertEqual(location_json, self.van.get_location(34)) @requests_mock.Mocker() def test_delete_location(self, m): # Test good location delete - m.delete(self.van.connection.uri + "locations/1", status_code=200) + m.delete(self.van.connection.uri + 'locations/1', status_code=200) self.van.delete_location(1) # Test invalid location delete - m.delete(self.van.connection.uri + "locations/2", status_code=404) + m.delete(self.van.connection.uri + 'locations/2', status_code=404) self.assertRaises(HTTPError, self.van.delete_location, 2) @requests_mock.Mocker() @@ -99,13 +77,7 @@ def test_create_location(self, m): loc_id = 32 - m.post( - self.van.connection.uri + "locations/findOrCreate", - json=loc_id, - status_code=204, - ) + m.post(self.van.connection.uri + 'locations/findOrCreate', json=loc_id, status_code=204) - self.assertEqual( - self.van.create_location(name="Chicagowide", city="Chicago", state="IL"), - loc_id, - ) + self.assertEqual(self.van.create_location( + name='Chicagowide', city='Chicago', state='IL'), loc_id) diff --git a/test/test_van/test_ngpvan.py b/test/test_van/test_ngpvan.py index ab4c7fc8e7..10e08df0ba 100644 --- a/test/test_van/test_ngpvan.py +++ b/test/test_van/test_ngpvan.py @@ -1,15 +1,17 @@ import os import unittest import requests_mock -from parsons import VAN, Table +from parsons.ngpvan.van import VAN +from parsons.etl.table import Table from test.utils import validate_list, assert_matching_tables from requests.exceptions import HTTPError class TestNGPVAN(unittest.TestCase): + def setUp(self): - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters", raise_for_status=False) + self.van = VAN(os.environ['VAN_API_KEY'], db="MyVoters", raise_for_status=False) def tearDown(self): @@ -18,22 +20,20 @@ def tearDown(self): @requests_mock.Mocker() def test_get_canvass_responses_contact_types(self, m): - json = {"name": "Auto Dial", "contactTypeId": 19, "channelTypeName": "Phone"} + json = {"name": "Auto Dial", + "contactTypeId": 19, + "channelTypeName": "Phone"} - m.get(self.van.connection.uri + "canvassResponses/contactTypes", json=json) + m.get(self.van.connection.uri + 'canvassResponses/contactTypes', json=json) - assert_matching_tables( - Table(json), self.van.get_canvass_responses_contact_types() - ) + assert_matching_tables(Table(json), self.van.get_canvass_responses_contact_types()) @requests_mock.Mocker() def test_get_canvass_responses_input_types(self, m): json = {"inputTypeId": 11, "name": "API"} - m.get(self.van.connection.uri + "canvassResponses/inputTypes", json=json) - assert_matching_tables( - Table(json), self.van.get_canvass_responses_input_types() - ) + m.get(self.van.connection.uri + 'canvassResponses/inputTypes', json=json) + assert_matching_tables(Table(json), self.van.get_canvass_responses_input_types()) @requests_mock.Mocker() def test_get_canvass_responses_result_codes(self, m): @@ -42,79 +42,66 @@ def test_get_canvass_responses_result_codes(self, m): "shortName": "BZ", "resultCodeId": 18, "name": "Busy", - "mediumName": "Busy", + "mediumName": "Busy" } - m.get(self.van.connection.uri + "canvassResponses/resultCodes", json=json) - assert_matching_tables( - Table(json), self.van.get_canvass_responses_result_codes() - ) + m.get(self.van.connection.uri + 'canvassResponses/resultCodes', json=json) + assert_matching_tables(Table(json), self.van.get_canvass_responses_result_codes()) @requests_mock.Mocker() def test_get_survey_questions(self, m): - json = { - "count": 67, - "items": [ - { - "status": "Active", - "responses": [ - { - "shortName": "1", - "surveyResponseId": 1288926, - "name": "1-Strong Walz", - "mediumName": "1", - }, - { - "shortName": "2", - "surveyResponseId": 1288928, - "name": "2-Lean Walz", - "mediumName": "2", - }, - ], - "scriptQuestion": "Who do you support for Governor?", - "name": "MN Governor Gen", + json = {u'count': 67, u'items': [{ + "status": "Active", + "responses": [ + {"shortName": "1", + "surveyResponseId": 1288926, + "name": "1-Strong Walz", + "mediumName": "1"}, + {"shortName": "2", + "surveyResponseId": 1288928, + "name": "2-Lean Walz", + "mediumName": "2"}], + "scriptQuestion": "Who do you support for Governor?", + "name": "MN Governor Gen", "surveyQuestionId": 311838, "mediumName": "MNGovG", "shortName": "MGG", "type": "Candidate", - "cycle": 2018, - } - ], - "nextPageLink": None, - } + "cycle": 2018 + }], + u'nextPageLink': None} - m.get(self.van.connection.uri + "surveyQuestions", json=json) + m.get(self.van.connection.uri + 'surveyQuestions', json=json) - expected = [ - "status", - "responses", - "scriptQuestion", - "name", - "surveyQuestionId", - "mediumName", - "shortName", - "type", - "cycle", - ] + expected = ['status', 'responses', 'scriptQuestion', 'name', + 'surveyQuestionId', 'mediumName', 'shortName', + 'type', 'cycle'] self.assertTrue(validate_list(expected, self.van.get_survey_questions())) @requests_mock.Mocker() def test_get_supporter_groups(self, m): - json = { - "items": [ - {"id": 12, "name": "tmc", "description": "A fun group."}, - {"id": 13, "name": "tmc", "description": "A fun group."}, - ], + json = {"items": [ + { + "id": 12, + "name": "tmc", + "description": "A fun group." + }, + { + "id": 13, + "name": "tmc", + "description": "A fun group." + }, + ], "nextPageLink": None, - "count": 3, + "count": 3 } - m.get(self.van.connection.uri + "supporterGroups", json=json) + m.get(self.van.connection.uri + 'supporterGroups', json=json) - ["id", "name", "description"] + ['id', 'name', 'description'] self.van.get_supporter_groups() @@ -122,7 +109,7 @@ def test_get_supporter_groups(self, m): def test_get_supporter_group(self, m): json = {"id": 12, "name": "tmc", "description": "A fun group."} - m.get(self.van.connection.uri + "supporterGroups/12", json=json) + m.get(self.van.connection.uri + 'supporterGroups/12', json=json) # Test that columns are expected columns self.assertEqual(self.van.get_supporter_group(12), json) @@ -132,18 +119,16 @@ def test_delete_supporter_group(self, m): # Test good input good_supporter_group_id = 5 - good_ep = f"supporterGroups/{good_supporter_group_id}" + good_ep = f'supporterGroups/{good_supporter_group_id}' m.delete(self.van.connection.uri + good_ep, status_code=204) self.van.delete_supporter_group(good_supporter_group_id) # Test bad input raises bad_supporter_group_id = 999 # bad_vanid = 99999 - bad_ep = f"supporterGroups/{bad_supporter_group_id}" + bad_ep = f'supporterGroups/{bad_supporter_group_id}' m.delete(self.van.connection.uri + bad_ep, status_code=404) - self.assertRaises( - HTTPError, self.van.delete_supporter_group, bad_supporter_group_id - ) + self.assertRaises(HTTPError, self.van.delete_supporter_group, bad_supporter_group_id) @requests_mock.Mocker() def test_add_person_supporter_group(self, m): @@ -151,21 +136,17 @@ def test_add_person_supporter_group(self, m): # Test good input good_supporter_group_id = 5 good_vanid = 12345 - good_uri = f"supporterGroups/{good_vanid}/people/{good_supporter_group_id}" + good_uri = f'supporterGroups/{good_vanid}/people/{good_supporter_group_id}' m.put(self.van.connection.uri + good_uri, status_code=204) self.van.add_person_supporter_group(good_vanid, good_supporter_group_id) # Test bad input bad_supporter_group_id = 999 bad_vanid = 99999 - bad_uri = f"supporterGroups/{bad_vanid}/people/{bad_supporter_group_id}" + bad_uri = f'supporterGroups/{bad_vanid}/people/{bad_supporter_group_id}' m.put(self.van.connection.uri + bad_uri, status_code=404) self.assertRaises( - HTTPError, - self.van.add_person_supporter_group, - bad_vanid, - bad_supporter_group_id, - ) + HTTPError, self.van.add_person_supporter_group, bad_vanid, bad_supporter_group_id) @requests_mock.Mocker() def test_delete_person_supporter_group(self, m): @@ -173,23 +154,19 @@ def test_delete_person_supporter_group(self, m): # Test good input good_supporter_group_id = 5 good_vanid = 12345 - good_ep = f"supporterGroups/{good_vanid}/people/{good_supporter_group_id}" + good_ep = f'supporterGroups/{good_vanid}/people/{good_supporter_group_id}' m.delete(self.van.connection.uri + good_ep, status_code=204) self.van.delete_person_supporter_group(good_vanid, good_supporter_group_id) # Test bad input raises bad_supporter_group_id = 999 bad_vanid = 99999 - bad_ep = f"supporterGroups/{bad_vanid}/people/{bad_supporter_group_id}" + bad_ep = f'supporterGroups/{bad_vanid}/people/{bad_supporter_group_id}' m.delete(self.van.connection.uri + bad_ep, status_code=404) self.assertRaises( - HTTPError, - self.van.delete_person_supporter_group, - bad_vanid, - bad_supporter_group_id, - ) + HTTPError, self.van.delete_person_supporter_group, bad_vanid, bad_supporter_group_id) -if __name__ == "__main__": +if __name__ == '__main__': unittest.main() diff --git a/test/test_van/test_people.py b/test/test_van/test_people.py index eebcf77205..6c30e9c30d 100644 --- a/test/test_van/test_people.py +++ b/test/test_van/test_people.py @@ -1,213 +1,142 @@ import unittest import os import requests_mock -from parsons import VAN +from parsons.ngpvan.van import VAN from requests.exceptions import HTTPError -from test.test_van.responses_people import ( - find_people_response, - get_person_response, - merge_contacts_response, - delete_person_response, -) +from test.test_van.responses_people import find_people_response, get_person_response -os.environ["VAN_API_KEY"] = "SOME_KEY" +os.environ['VAN_API_KEY'] = 'SOME_KEY' class TestNGPVAN(unittest.TestCase): + def setUp(self): - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters", raise_for_status=False) + + self.van = VAN(os.environ['VAN_API_KEY'], db="MyVoters", raise_for_status=False) @requests_mock.Mocker() def test_find_person(self, m): - m.post( - self.van.connection.uri + "people/find", - json=find_people_response, - status_code=200, - ) - person = self.van.find_person( - first_name="Bob", last_name="Smith", phone=4142020792 - ) + m.post(self.van.connection.uri + 'people/find', json=find_people_response, status_code=200) + + person = self.van.find_person(first_name='Bob', last_name='Smith', phone=4142020792) self.assertEqual(person, find_people_response) @requests_mock.Mocker() def test_find_person_json(self, m): + json = { "firstName": "Bob", "lastName": "Smith", - "phones": [{"phoneNumber": 4142020792}], + "phones": [{ + "phoneNumber": 4142020792 + }] } - m.post( - self.van.connection.uri + "people/find", - json=find_people_response, - status_code=200, - ) + m.post(self.van.connection.uri + 'people/find', json=find_people_response, status_code=200) person = self.van.find_person_json(match_json=json) self.assertEqual(person, find_people_response) def test_upsert_person(self): + pass def test_upsert_person_json(self): + pass def test_update_person(self): + pass def test_update_person_json(self): + pass def test_people_search(self): + # Already tested as part of upsert and find person methods pass def test_valid_search(self): + # Fails with FN / LN Only - self.assertRaises( - ValueError, - self.van._valid_search, - "Barack", - "Obama", - None, - None, - None, - None, - None, - ) + self.assertRaises(ValueError, self.van._valid_search, 'Barack', + 'Obama', None, None, None, None, None) # Fails with only Zip - self.assertRaises( - ValueError, - self.van._valid_search, - "Barack", - "Obama", - None, - None, - None, - None, - 60622, - ) + self.assertRaises(ValueError, self.van._valid_search, 'Barack', + 'Obama', None, None, None, None, 60622) # Fails with no street number - self.assertRaises( - ValueError, - self.van._valid_search, - "Barack", - "Obama", - None, - None, - None, - "Pennsylvania Ave", - None, - ) + self.assertRaises(ValueError, self.van._valid_search, 'Barack', + 'Obama', None, None, None, 'Pennsylvania Ave', None) # Successful with FN/LN/Email - self.van._valid_search( - "Barack", "Obama", "barack@email.com", None, None, None, None - ) + self.van._valid_search('Barack', 'Obama', 'barack@email.com', None, None, None, + None) # Successful with FN/LN/DOB/ZIP - self.van._valid_search( - "Barack", "Obama", "barack@email.com", None, "2000-01-01", None, 20009 - ) + self.van._valid_search('Barack', 'Obama', 'barack@email.com', None, '2000-01-01', + None, 20009) # Successful with FN/LN/Phone - self.van._valid_search("Barack", "Obama", None, 2024291000, None, None, None) + self.van._valid_search('Barack', 'Obama', None, 2024291000, None, None, + None) @requests_mock.Mocker() def test_get_person(self, m): + json = get_person_response # Test works with external ID - m.get(self.van.connection.uri + "people/DWID:15406767", json=json) - person = self.van.get_person("15406767", id_type="DWID") + m.get(self.van.connection.uri + 'people/DWID:15406767', json=json) + person = self.van.get_person('15406767', id_type='DWID') self.assertEqual(get_person_response, person) # Test works with vanid - m.get(self.van.connection.uri + "people/19722445", json=json) - person = self.van.get_person("19722445") + m.get(self.van.connection.uri + 'people/19722445', json=json) + person = self.van.get_person('19722445') self.assertEqual(get_person_response, person) - @requests_mock.Mocker() - def test_delete_person(self, m): - json = delete_person_response - # Test works with vanid - m.delete(self.van.connection.uri + "people/19722445", json=json) - response = self.van.delete_person("19722445") - self.assertEqual(delete_person_response, response) - @requests_mock.Mocker() def test_apply_canvass_result(self, m): + # Test a valid attempt - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) + m.post(self.van.connection.uri + 'people/2335282/canvassResponses', status_code=204) self.van.apply_canvass_result(2335282, 18) # Test a bad result code - json = { - "errors": [ - { - "code": "INVALID_PARAMETER", - "text": "'resultCodeId' must be a valid result code in the current context.", - "properties": ["resultCodeId"], - } - ] - } - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", - json=json, - status_code=400, - ) + json = {'errors': + [{'code': 'INVALID_PARAMETER', + 'text': "'resultCodeId' must be a valid result code in the current context.", + 'properties': ['resultCodeId']} + ]} + m.post(self.van.connection.uri + 'people/2335282/canvassResponses', + json=json, status_code=400) self.assertRaises(HTTPError, self.van.apply_canvass_result, 2335282, 0) # Test a bad vanid - json = { - "errors": [ - { - "code": "INTERNAL_SERVER_ERROR", - "text": "An unknown error occurred", - "referenceCode": "88A111-E2FF8", - } - ] - } - m.post( - self.van.connection.uri + "people/0/canvassResponses", - json=json, - status_code=400, - ) + json = {'errors': + [{'code': 'INTERNAL_SERVER_ERROR', + 'text': 'An unknown error occurred', + 'referenceCode': '88A111-E2FF8'} + ]} + m.post(self.van.connection.uri + 'people/0/canvassResponses', json=json, status_code=400) self.assertRaises(HTTPError, self.van.apply_canvass_result, 0, 18) # Test a good dwid - m.post( - self.van.connection.uri + "people/DWID:2335282/canvassResponses", - status_code=204, - ) - self.van.apply_canvass_result(2335282, 18, id_type="DWID") - - # test canvassing via phone or sms without providing phone number - self.assertRaises( - Exception, self.van.apply_canvass_result, 2335282, 18, contact_type_id=37 - ) - - # test canvassing via phone or sms with providing phone number - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) - self.van.apply_canvass_result( - 2335282, 18, contact_type_id=37, phone="(516)-555-2342" - ) + m.post(self.van.connection.uri + 'people/DWID:2335282/canvassResponses', status_code=204) + self.van.apply_canvass_result(2335282, 18, id_type='DWID') @requests_mock.Mocker() def test_apply_survey_question(self, m): + # Test valid survey question - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=204 - ) + m.post(self.van.connection.uri + 'people/2335282/canvassResponses', status_code=204) self.van.apply_survey_response(2335282, 351006, 1443891) # Test bad survey response id @@ -219,12 +148,8 @@ def test_apply_survey_question(self, m): # 'properties': ['responses[0].surveyResponseId'] # }] # } - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=400 - ) - self.assertRaises( - HTTPError, self.van.apply_survey_response, 2335282, 0, 1443891 - ) + m.post(self.van.connection.uri + 'people/2335282/canvassResponses', status_code=400) + self.assertRaises(HTTPError, self.van.apply_survey_response, 2335282, 0, 1443891) # Test bad survey question id # json = { @@ -235,56 +160,44 @@ def test_apply_survey_question(self, m): # 'properties': ['responses[0].surveyQuestionId'] # }] # } - m.post( - self.van.connection.uri + "people/2335282/canvassResponses", status_code=400 - ) + m.post(self.van.connection.uri + 'people/2335282/canvassResponses', status_code=400) self.assertRaises(HTTPError, self.van.apply_survey_response, 2335282, 351006, 0) def test_toggle_volunteer_action(self): + pass def test_apply_response(self): + pass @requests_mock.Mocker() def test_create_relationship(self, m): + relationship_id = 12 bad_vanid_1 = 99999 good_vanid_1 = 12345 vanid_2 = 54321 # Bad request - m.post( - self.van.connection.uri + "people/{}/relationships".format(bad_vanid_1), - status_code=404, - ) + m.post(self.van.connection.uri + "people/{}/relationships".format(bad_vanid_1), + status_code=404) # Good request - m.post( - self.van.connection.uri + "people/{}/relationships".format(good_vanid_1), - status_code=204, - ) + m.post(self.van.connection.uri + "people/{}/relationships".format(good_vanid_1), + status_code=204) # Test bad input self.assertRaises( - HTTPError, - self.van.create_relationship, - bad_vanid_1, - vanid_2, - relationship_id, - ) + HTTPError, self.van.create_relationship, bad_vanid_1, vanid_2, relationship_id) self.assertRaises( - HTTPError, - self.van.create_relationship, - bad_vanid_1, - vanid_2, - relationship_id, - ) + HTTPError, self.van.create_relationship, bad_vanid_1, vanid_2, relationship_id) self.van.create_relationship(good_vanid_1, vanid_2, relationship_id) @requests_mock.Mocker() def test_apply_person_code(self, m): + vanid = 999 code_id = 888 @@ -295,17 +208,3 @@ def test_apply_person_code(self, m): # Test bad request m.post(self.van.connection.uri + f"people/{vanid}/codes", status_code=404) self.assertRaises(HTTPError, self.van.apply_person_code, vanid, code_id) - - @requests_mock.Mocker() - def test_merge_contacts(self, m): - source_vanid = 12345 - - m.put( - self.van.connection.uri + f"people/{source_vanid}/mergeInto", - json=merge_contacts_response, - status_code=200, - ) - - person = self.van.merge_contacts(source_vanid=source_vanid, primary_vanid=56789) - - self.assertEqual(person, merge_contacts_response) diff --git a/test/test_van/test_printed_lists.py b/test/test_van/test_printed_lists.py deleted file mode 100644 index ba73929d15..0000000000 --- a/test/test_van/test_printed_lists.py +++ /dev/null @@ -1,29 +0,0 @@ -import unittest -import os -import requests_mock -from parsons import VAN -from test.test_van.responses_printed_lists import list_json, single_list_json - - -class TestSavedLists(unittest.TestCase): - def setUp(self): - - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters", raise_for_status=False) - - @requests_mock.Mocker() - def test_get_printed_lists(self, m): - - m.get(self.van.connection.uri + "printedLists", json=list_json) - - result = self.van.get_printed_lists(folder_name="Covington Canvass Turfs") - - self.assertEqual(result.num_rows, 14) - - @requests_mock.Mocker() - def test_get_printed_list(self, m): - - m.get(self.van.connection.uri + "printedLists/43-0000", json=single_list_json) - - result = self.van.get_printed_list(printed_list_number="43-0000") - - self.assertEqual(result["number"], "43-0000") diff --git a/test/test_van/test_saved_lists.py b/test/test_van/test_saved_lists.py index 740bb59971..111b9092ea 100644 --- a/test/test_van/test_saved_lists.py +++ b/test/test_van/test_saved_lists.py @@ -2,15 +2,17 @@ import os import requests_mock import unittest.mock as mock -from parsons import VAN, Table +from parsons.ngpvan.van import VAN +from parsons.etl.table import Table from test.utils import validate_list from parsons.utilities import cloud_storage class TestSavedLists(unittest.TestCase): + def setUp(self): - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters", raise_for_status=False) + self.van = VAN(os.environ['VAN_API_KEY'], db="MyVoters", raise_for_status=False) def tearDown(self): @@ -19,23 +21,18 @@ def tearDown(self): @requests_mock.Mocker() def test_get_saved_lists(self, m): - json = { - "count": 1, - "items": [ - { - "savedListId": 517612, - "listCount": 974656, - "name": "LikelyParents(16andunder)_DWID_S... - MN", - "doorCount": 520709, - "description": "null", - } - ], - "nextPageLink": None, - } + json = {'count': 1, 'items': [ + {"savedListId": 517612, + "listCount": 974656, + "name": "LikelyParents(16andunder)_DWID_S... - MN", + "doorCount": 520709, + "description": "null" + } + ], 'nextPageLink': None} - m.get(self.van.connection.uri + "savedLists", json=json) + m.get(self.van.connection.uri + 'savedLists', json=json) - expected = ["savedListId", "listCount", "name", "doorCount", "description"] + expected = ['savedListId', 'listCount', 'name', 'doorCount', 'description'] self.assertTrue(validate_list(expected, self.van.get_saved_lists())) @@ -44,15 +41,14 @@ def test_get_saved_list(self, m): saved_list_id = 517612 - json = { - "savedListId": 517612, - "listCount": 974656, - "name": "LikelyParents(16andunder)_DWID_S... - MN", - "doorCount": 520709, - "description": "null", - } + json = {"savedListId": 517612, + "listCount": 974656, + "name": "LikelyParents(16andunder)_DWID_S... - MN", + "doorCount": 520709, + "description": "null" + } - m.get(self.van.connection.uri + f"savedLists/{saved_list_id}", json=json) + m.get(self.van.connection.uri + f'savedLists/{saved_list_id}', json=json) # expected = ['savedListId', 'listCount', 'name', 'doorCount', 'description'] @@ -61,56 +57,53 @@ def test_get_saved_list(self, m): def test_upload_saved_list(self): cloud_storage.post_file = mock.MagicMock() - cloud_storage.post_file.return_value = "https://box.com/my_file.zip" + cloud_storage.post_file.return_value = 'https://box.com/my_file.zip' self.van.connection._soap_client = mock.MagicMock() self.van.get_folders = mock.MagicMock() - self.van.get_folders.return_value = [{"folderId": 1}] + self.van.get_folders.return_value = [{'folderId': 1}] - tbl = Table([["VANID"], ["1"], ["2"], ["3"]]) + tbl = Table([['VANID'], ['1'], ['2'], ['3']]) self.van.upload_saved_list( - tbl, "GOTV List", 1, replace=True, url_type="S3", bucket="tmc-scratch" - ) + tbl, 'GOTV List', 1, replace=True, url_type='S3', bucket='tmc-scratch') assert self.van.connection._soap_client.service.CreateAndStoreSavedList.called @requests_mock.Mocker() def test_upload_saved_list_rest(self): cloud_storage.post_file = mock.MagicMock() - cloud_storage.post_file.return_value = "https://box.com/my_file.zip" + cloud_storage.post_file.return_value = 'https://box.com/my_file.zip' self.van.get_folders = mock.MagicMock() - self.van.get_folders.return_value = [{"folderId": 1}] + self.van.get_folders.return_value = [{'folderId': 1}] - tbl = Table([["VANID"], ["1"], ["2"], ["3"]]) + tbl = Table([['VANID'], ['1'], ['2'], ['3']]) response = self.van.upload_saved_list_rest( - tbl=tbl, - url_type="S3", - folder_id=1, - list_name="GOTV List", - description="parsons test list", + tbl=tbl, url_type="S3", + folder_id=1, list_name="GOTV List", description="parsons test list", callback_url="https://webhook.site/69ab58c3-a3a7-4ed8-828c-1ea850cb4160", - columns=["VANID"], - id_column="VANID", + columns=["VANID"], id_column="VANID", bucket="tmc-scratch", - overwrite=517612, - ) + overwrite=517612 + ) self.assertIn("jobId", response) @requests_mock.Mocker() def test_get_folders(self, m): - json = { - "count": 2, - "items": [ - {"folderId": 5046, "name": "#2018_MN_active_universe"}, - {"folderId": 2168, "name": "API Generated Lists"}, - ], - "nextPageLink": None, - } + json = {u'count': 2, + u'items': [ + { + u'folderId': 5046, + u'name': u'#2018_MN_active_universe' + }, + {u'folderId': 2168, + u'name': u'API Generated Lists' + } + ], u'nextPageLink': None} - m.get(self.van.connection.uri + "folders", json=json) + m.get(self.van.connection.uri + 'folders', json=json) - expected = ["folderId", "name"] + expected = ['folderId', 'name'] self.assertTrue(validate_list(expected, self.van.get_folders())) @@ -121,22 +114,20 @@ def test_get_folder(self, m): json = {"folderId": 5046, "name": "#2018_MN_active_universe"} - m.get(self.van.connection.uri + f"folders/{folder_id}", json=json) + m.get(self.van.connection.uri + f'folders/{folder_id}', json=json) self.assertEqual(json, self.van.get_folder(folder_id)) @requests_mock.Mocker() def test_export_job_types(self, m): - json = { - "count": 1, - "items": [{"exportJobTypeId": 4, "name": "SavedListExport"}], - "nextPageLink": None, - } + json = {u'count': 1, u'items': + [{u'exportJobTypeId': 4, u'name': u'SavedListExport'}], + u'nextPageLink': None} - m.get(self.van.connection.uri + "exportJobTypes", json=json) + m.get(self.van.connection.uri + 'exportJobTypes', json=json) - expected = ["exportJobTypeId", "name"] + expected = ['exportJobTypeId', 'name'] self.assertTrue(validate_list(expected, self.van.get_export_job_types())) @@ -145,25 +136,23 @@ def test_export_job_create(self, m): saved_list_id = 517612 - json = { - "status": "Completed", - "errorCode": "null", - "exportJobGuid": "bf4d1297-1c77-3fb2-03bd-f0acda122d37", - "activistCodes": "null", - "canvassFileRequestId": 448, - "dateExpired": "2018-09-08T16:04:00Z", - "surveyQuestions": "null", - "webhookUrl": "https://www.nothing.com/", - "downloadUrl": "https://ngpvan.blob.core.windows.net/canvass-files-savedlistexport/bf4d1297-1c77-3fb2-03bd-f0acda122d37_2018-09-08T13:03:27.7191831-04:00.csv", # noqa: E501 - "savedListId": 517612, - "districtFields": "null", - "canvassFileRequestGuid": "bf4d1297-1c77-3fb2-03bd-f0acda122d37", - "customFields": "null", - "type": 4, - "exportJobId": 448, - } - - m.post(self.van.connection.uri + "exportJobs", json=json, status_code=201) + json = {"status": "Completed", + "errorCode": "null", + "exportJobGuid": "bf4d1297-1c77-3fb2-03bd-f0acda122d37", + "activistCodes": "null", + "canvassFileRequestId": 448, + "dateExpired": "2018-09-08T16:04:00Z", + "surveyQuestions": "null", + "webhookUrl": "https://www.nothing.com/", + "downloadUrl": "https://ngpvan.blob.core.windows.net/canvass-files-savedlistexport/bf4d1297-1c77-3fb2-03bd-f0acda122d37_2018-09-08T13:03:27.7191831-04:00.csv", # noqa: E501 + "savedListId": 517612, + "districtFields": "null", + "canvassFileRequestGuid": "bf4d1297-1c77-3fb2-03bd-f0acda122d37", + "customFields": "null", + "type": 4, + "exportJobId": 448} + + m.post(self.van.connection.uri + 'exportJobs', json=json, status_code=201) # expected = [ # 'status', @@ -189,23 +178,21 @@ def test_get_export_job(self, m): export_job_id = 448 - json = { - "status": "Completed", - "errorCode": "null", - "exportJobGuid": "bf4d1297-1c77-3fb2-03bd-f0acda122d37", - "activistCodes": "null", - "canvassFileRequestId": 448, - "dateExpired": "2018-09-08T16:04:00Z", - "surveyQuestions": "null", - "webhookUrl": "https://www.nothing.com/", - "downloadUrl": "https://ngpvan.blob.core.windows.net/canvass-files-savedlistexport/bf4d1297-1c77-3fb2-03bd-f0acda122d37_2018-09-08T13:03:27.7191831-04:00.csv", # noqa: E501 - "savedListId": 517612, - "districtFields": "null", - "canvassFileRequestGuid": "bf4d1297-1c77-3fb2-03bd-f0acda122d37", - "customFields": "null", - "type": 4, - "exportJobId": 448, - } + json = {"status": "Completed", + "errorCode": "null", + "exportJobGuid": "bf4d1297-1c77-3fb2-03bd-f0acda122d37", + "activistCodes": "null", + "canvassFileRequestId": 448, + "dateExpired": "2018-09-08T16:04:00Z", + "surveyQuestions": "null", + "webhookUrl": "https://www.nothing.com/", + "downloadUrl": "https://ngpvan.blob.core.windows.net/canvass-files-savedlistexport/bf4d1297-1c77-3fb2-03bd-f0acda122d37_2018-09-08T13:03:27.7191831-04:00.csv", # noqa: E501 + "savedListId": 517612, + "districtFields": "null", + "canvassFileRequestGuid": "bf4d1297-1c77-3fb2-03bd-f0acda122d37", + "customFields": "null", + "type": 4, + "exportJobId": 448} # expected = [ # 'status', @@ -224,6 +211,6 @@ def test_get_export_job(self, m): # 'type', # 'exportJobId'] - m.get(self.van.connection.uri + f"exportJobs/{export_job_id}", json=json) + m.get(self.van.connection.uri + f'exportJobs/{export_job_id}', json=json) self.assertEqual(json, self.van.get_export_job(export_job_id)) diff --git a/test/test_van/test_scores.py b/test/test_van/test_scores.py index 63761946bc..93d41aa217 100644 --- a/test/test_van/test_scores.py +++ b/test/test_van/test_scores.py @@ -2,51 +2,39 @@ import os import requests_mock import unittest.mock as mock -from parsons import VAN, Table +from parsons.ngpvan.van import VAN +from parsons.etl.table import Table from test.utils import validate_list from parsons.utilities import cloud_storage -os.environ["VAN_API_KEY"] = "SOME_KEY" +os.environ['VAN_API_KEY'] = 'SOME_KEY' class TestScores(unittest.TestCase): + def setUp(self): - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters", raise_for_status=False) + self.van = VAN(os.environ['VAN_API_KEY'], db="MyVoters", raise_for_status=False) @requests_mock.Mocker() def test_get_scores(self, m): - json = { - "count": 2, - "items": [ - { - "origin": None, - "scoreId": 2716, - "name": "Democratic Party Support", - "maxValue": 100.0, - "minValue": 1.0, - "state": None, - "shortName": "Dem Support", - "description": None, - } - ], - "nextPageLink": None, - } + json = {u'count': 2, u'items': + [{u'origin': None, + u'scoreId': 2716, + u'name': u'Democratic Party Support', + u'maxValue': 100.0, + u'minValue': 1.0, + u'state': None, + u'shortName': u'Dem Support', + u'description': None}], + u'nextPageLink': None} - m.get(self.van.connection.uri + "scores", json=json) + m.get(self.van.connection.uri + 'scores', json=json) - expected = [ - "origin", - "scoreId", - "name", - "maxValue", - "minValue", - "state", - "shortName", - "description", - ] + expected = ['origin', 'scoreId', 'name', 'maxValue', 'minValue', + 'state', 'shortName', 'description'] self.assertTrue(validate_list(expected, self.van.get_scores())) @@ -55,90 +43,64 @@ def test_get_score(self, m): score_id = 2716 - json = { - "origin": None, - "scoreId": 2716, - "name": "Democratic Party Support", - "maxValue": 100.0, - "minValue": 1.0, - "state": None, - "shortName": "Dem Support", - "description": None, - } + json = {u'origin': None, + u'scoreId': 2716, + u'name': u'Democratic Party Support', + u'maxValue': 100.0, + u'minValue': 1.0, + u'state': None, + u'shortName': u'Dem Support', + u'description': None} - m.get(self.van.connection.uri + "scores/{}".format(score_id), json=json) + m.get(self.van.connection.uri + 'scores/{}'.format(score_id), json=json) self.assertEqual(json, self.van.get_score(score_id)) @requests_mock.Mocker() def test_get_score_updates(self, m): json = { - "items": [ - { - "scoreUpdateId": 58319, - "score": { - "scoreId": 29817, - "name": "TargetSmart Gun Ownership", - "shortName": None, - "description": None, - "minValue": 0.0, - "maxValue": 100.0, - "state": "MT", - "origin": None, - }, - "updateStatistics": { - "totalRows": 856644, - "duplicateRows": 0, - "matchedRows": 856644, - "matchPercent": 100.0, - "increasedBy": 441264, - "decreasedBy": 280588, - "nulledOut": 3649, - "added": 115129, - "outOfRange": 0, - "badValues": 0, - "maxValue": 95.9, - "minValue": 11.2, - "averageValue": 72.3338, - "medianValue": 76.3, - }, - "loadStatus": "Completed", - "dateProcessed": "2019-09-10T02:07:00Z", - } - ], - "nextPageLink": None, - "count": 306, + 'items': [{ + 'scoreUpdateId': 58319, + 'score': { + 'scoreId': 29817, + 'name': 'TargetSmart Gun Ownership', + 'shortName': None, + 'description': None, + 'minValue': 0.0, + 'maxValue': 100.0, + 'state': 'MT', + 'origin': None + }, + 'updateStatistics': { + 'totalRows': 856644, + 'duplicateRows': 0, + 'matchedRows': 856644, + 'matchPercent': 100.0, + 'increasedBy': 441264, + 'decreasedBy': 280588, + 'nulledOut': 3649, + 'added': 115129, + 'outOfRange': 0, + 'badValues': 0, + 'maxValue': 95.9, + 'minValue': 11.2, + 'averageValue': 72.3338, + 'medianValue': 76.3 + }, + 'loadStatus': 'Completed', + 'dateProcessed': '2019-09-10T02:07:00Z' + }], + 'nextPageLink': None, + 'count': 306 } - m.get(self.van.connection.uri + "scoreUpdates", json=json) - - expected = [ - "scoreUpdateId", - "loadStatus", - "dateProcessed", - "added", - "averageValue", - "badValues", - "decreasedBy", - "duplicateRows", - "increasedBy", - "matchPercent", - "matchedRows", - "maxValue", - "medianValue", - "minValue", - "nulledOut", - "outOfRange", - "totalRows", - "description", - "maxValue", - "minValue", - "name", - "origin", - "scoreId", - "shortName", - "state", - ] + m.get(self.van.connection.uri + 'scoreUpdates', json=json) + + expected = ['scoreUpdateId', 'loadStatus', 'dateProcessed', 'added', 'averageValue', + 'badValues', 'decreasedBy', 'duplicateRows', 'increasedBy', 'matchPercent', + 'matchedRows', 'maxValue', 'medianValue', 'minValue', 'nulledOut', + 'outOfRange', 'totalRows', 'description', 'maxValue', 'minValue', 'name', + 'origin', 'scoreId', 'shortName', 'state'] self.assertTrue(validate_list(expected, self.van.get_score_updates())) @@ -163,23 +125,23 @@ def test_get_score_update(self, m): "minValue": 10.0, "duplicateRows": "null", "averageValue": 20.0, - "decreasedBy": 2, + "decreasedBy": 2 }, "score": { "origin": "null", "scoreId": 2716, "name": "Democratic Party Support", - "maxValue": 100.0, - "minValue": 1.0, - "state": "null", - "shortName": "null", - "description": "null", + "maxValue": 100.0, + "minValue": 1.0, + "state": "null", + "shortName": "null", + "description": "null" }, "dateProcessed": "null", - "scoreUpdateId": 27892, + "scoreUpdateId": 27892 } - m.get(self.van.connection.uri + f"scoreUpdates/{score_update_id}", json=json) + m.get(self.van.connection.uri + f'scoreUpdates/{score_update_id}', json=json) # expected = ['loadStatus', 'updateStatistics', 'score', 'dateProcessed', 'scoreUpdateId'] @@ -190,75 +152,50 @@ def test_update_score_status(self, m): score_update_id = 27892 - m.patch( - self.van.connection.uri + "scoreUpdates/{}".format(score_update_id), - status_code=204, - ) + m.patch(self.van.connection.uri + 'scoreUpdates/{}'.format(score_update_id), + status_code=204) # Test bad input - self.assertRaises( - ValueError, self.van.update_score_status, score_update_id, "not a thing." - ) + self.assertRaises(ValueError, self.van.update_score_status, score_update_id, 'not a thing.') # Test good input - self.assertTrue(self.van.update_score_status(score_update_id, "approved")) + self.assertTrue(self.van.update_score_status(score_update_id, 'approved')) @requests_mock.Mocker() def test_upload_scores(self, m): # Mock Cloud Storage cloud_storage.post_file = mock.MagicMock() - cloud_storage.post_file.return_value = "https://box.com/my_file.zip" + cloud_storage.post_file.return_value = 'https://box.com/my_file.zip' # Test uploading a job - tbl = Table([["vanid", "col"], ["1", ".5"]]) - json = {"jobId": 9749} - m.post(self.van.connection.uri + "FileLoadingJobs", json=json, status_code=201) - self.van.upload_scores( - tbl, [{"score_id": 9999, "score_column": "col"}], url_type="S3" - ) + tbl = Table([['vanid', 'col'], ['1', '.5']]) + json = {'jobId': 9749} + m.post(self.van.connection.uri + 'FileLoadingJobs', json=json, status_code=201) + self.van.upload_scores(tbl, [{'score_id': 9999, 'score_column': 'col'}], url_type='S3') @requests_mock.Mocker() def test_create_file_load(self, m): - file_name = "test_scores.csv" - file_url_good = "http://tmc.org/test_scores.zip" + file_name = 'test_scores.csv' + file_url_good = 'http://tmc.org/test_scores.zip' # file_url_bad = 'http://tmc.org/test_scores' - columns = ["vanid", "score"] - id_column = "vanid" - id_type = "VANID" + columns = ['vanid', 'score'] + id_column = 'vanid' + id_type = 'VANID' score_id = 2716 - score_column = "score" - bad_delimiter = "*" + score_column = 'score' + bad_delimiter = '*' - json = {"jobId": 9749} + json = {'jobId': 9749} - m.post(self.van.connection.uri + "FileLoadingJobs", json=json, status_code=201) + m.post(self.van.connection.uri + 'FileLoadingJobs', json=json, status_code=201) # Test bad delimiter - self.assertRaises( - ValueError, - self.van.create_file_load, - file_name, - file_url_good, - columns, - id_column, - id_type, - score_id, - score_column, - delimiter=bad_delimiter, - ) + self.assertRaises(ValueError, self.van.create_file_load, file_name, file_url_good, columns, + id_column, id_type, score_id, score_column, delimiter=bad_delimiter) # Test good request - self.assertEqual( - json["jobId"], - self.van.create_file_load( - file_name, - file_url_good, - columns, - id_column, - id_type, - score_id, - score_column, - ), - ) + self.assertEqual(json['jobId'], self.van.create_file_load(file_name, file_url_good, columns, + id_column, id_type, score_id, + score_column)) diff --git a/test/test_van/test_signups.py b/test/test_van/test_signups.py index 27d9bb8c5b..1d46a261f9 100644 --- a/test/test_van/test_signups.py +++ b/test/test_van/test_signups.py @@ -1,91 +1,71 @@ import unittest import os import requests_mock -from parsons import VAN +from parsons.ngpvan.van import VAN from test.utils import validate_list -signup_status = [ - {"statusId": 5, "name": "Cancelled"}, - {"statusId": 2, "name": "Completed"}, - {"statusId": 11, "name": "Confirmed"}, - {"statusId": 4, "name": "Invited"}, - {"statusId": 18, "name": "Left Msg"}, - {"statusId": 6, "name": "No Show"}, - {"statusId": 1, "name": "Scheduled"}, - {"statusId": 30, "name": "Sched-Web"}, - {"statusId": 15, "name": "Walk In"}, -] +signup_status = [{'statusId': 5, 'name': 'Cancelled'}, + {'statusId': 2, 'name': 'Completed'}, + {'statusId': 11, 'name': 'Confirmed'}, + {'statusId': 4, 'name': 'Invited'}, + {'statusId': 18, 'name': 'Left Msg'}, + {'statusId': 6, 'name': 'No Show'}, + {'statusId': 1, 'name': 'Scheduled'}, + {'statusId': 30, 'name': 'Sched-Web'}, + {'statusId': 15, 'name': 'Walk In'}] signup = { - "eventSignupId": 14285, - "person": { - "vanId": 100349920, - "firstName": "Helen", - "middleName": None, - "lastName": "Maddix", - "contactOrganizationCommonName": None, - "contactOrganizationOfficialName": None, - "contactModeId": 1, - "email": None, - "phone": None, - }, - "event": {"eventId": 750001004, "name": "Canvass 01", "shortName": "Can01"}, - "shift": {"eventShiftId": 19076, "name": "Default Shift"}, - "role": {"roleId": 263920, "name": "Leader"}, - "status": {"statusId": 11, "name": "Confirmed"}, - "location": { - "locationId": 3, - "name": "First Presbyterian Church", - "displayName": "First Presbyterian Church, 340 5th Ave S Saint Cloud, MN 56301 ", - }, - "startTimeOverride": "2018-12-31T08:00:00-05:00", - "endTimeOverride": "2018-12-31T10:00:00-05:00", - "printedLists": None, - "minivanExports": None, - "supporterGroupId": None, - "isOfflineSignup": True, -} - -signup_expected = [ - "eventSignupId", - "startTimeOverride", - "endTimeOverride", - "printedLists", - "minivanExports", - "supporterGroupId", - "isOfflineSignup", - "contactModeId", - "contactOrganizationCommonName", - "contactOrganizationOfficialName", - "email", - "firstName", - "lastName", - "middleName", - "phone", - "vanId", - "status_name", - "status_statusId", - "event_eventId", - "event_name", - "event_shortName", - "shift_eventShiftId", - "shift_name", - "role_name", - "role_roleId", - "location_displayName", - "location_locationId", - "location_name", -] - -os.environ["VAN_API_KEY"] = "SOME_KEY" + 'eventSignupId': 14285, + 'person': { + 'vanId': 100349920, + 'firstName': 'Helen', + 'middleName': None, + 'lastName': 'Maddix', + 'contactOrganizationCommonName': None, + 'contactOrganizationOfficialName': None, + 'contactModeId': 1, + 'email': None, + 'phone': None}, + 'event': { + 'eventId': 750001004, + 'name': 'Canvass 01', + 'shortName': 'Can01'}, + 'shift': { + 'eventShiftId': 19076, + 'name': 'Default Shift'}, + 'role': { + 'roleId': 263920, + 'name': 'Leader'}, + 'status': { + 'statusId': 11, + 'name': 'Confirmed'}, + 'location': { + 'locationId': 3, + 'name': 'First Presbyterian Church', + 'displayName': 'First Presbyterian Church, 340 5th Ave S Saint Cloud, MN 56301 '}, + 'startTimeOverride': '2018-12-31T08:00:00-05:00', + 'endTimeOverride': '2018-12-31T10:00:00-05:00', + 'printedLists': None, + 'minivanExports': None, + 'supporterGroupId': None, + 'isOfflineSignup': True} + +signup_expected = ['eventSignupId', 'startTimeOverride', 'endTimeOverride', 'printedLists', + 'minivanExports', 'supporterGroupId', 'isOfflineSignup', 'contactModeId', + 'contactOrganizationCommonName', 'contactOrganizationOfficialName', 'email', + 'firstName', 'lastName', 'middleName', 'phone', 'vanId', 'status_name', + 'status_statusId', 'event_eventId', 'event_name', 'event_shortName', + 'shift_eventShiftId', 'shift_name', 'role_name', 'role_roleId', + 'location_displayName', 'location_locationId', 'location_name'] + +os.environ['VAN_API_KEY'] = 'SOME_KEY' class TestSignups(unittest.TestCase): + def setUp(self): - self.van = VAN( - os.environ["VAN_API_KEY"], db="EveryAction", raise_for_status=False - ) + self.van = VAN(os.environ['VAN_API_KEY'], db="EveryAction", raise_for_status=False) def tearDown(self): @@ -94,59 +74,44 @@ def tearDown(self): @requests_mock.Mocker() def test_get_signup_statuses(self, m): - m.get(self.van.connection.uri + "signups/statuses", json=signup_status) + m.get(self.van.connection.uri + 'signups/statuses', json=signup_status) # Test events lookup - self.assertTrue( - validate_list( - ["statusId", "name"], self.van.get_signups_statuses(event_id=750000849) - ) - ) + self.assertTrue(validate_list(['statusId', 'name'], + self.van.get_signups_statuses(event_id=750000849))) # Test event type lookup - self.assertTrue( - validate_list( - ["statusId", "name"], - self.van.get_signups_statuses(event_type_id=750000849), - ) - ) + self.assertTrue(validate_list(['statusId', 'name'], + self.van.get_signups_statuses(event_type_id=750000849))) @requests_mock.Mocker() def test_get_signups(self, m): - json = {"items": [signup], "nextPageLink": None, "count": 1} + json = {'items': [signup], 'nextPageLink': None, 'count': 1} - m.get(self.van.connection.uri + "signups", json=json) + m.get(self.van.connection.uri + 'signups', json=json) - self.assertTrue( - validate_list( - signup_expected, self.van.get_event_signups(event_id=750001004) - ) - ) + self.assertTrue(validate_list(signup_expected, + self.van.get_event_signups(event_id=750001004))) - self.assertTrue( - validate_list(signup_expected, self.van.get_person_signups(vanid=750000849)) - ) + self.assertTrue(validate_list(signup_expected, + self.van.get_person_signups(vanid=750000849))) @requests_mock.Mocker() def test_get_signup(self, m): event_signup_id = 14285 - m.get( - self.van.connection.uri + f"signups/{event_signup_id}".format(), json=signup - ) + m.get(self.van.connection.uri + f'signups/{event_signup_id}'.format(), json=signup) self.assertEqual(signup, self.van.get_signup(event_signup_id)) @requests_mock.Mocker() def test_create_signup(self, m): - m.post(self.van.connection.uri + "signups", json=14285, status_code=201) + m.post(self.van.connection.uri + 'signups', json=14285, status_code=201) - self.assertEqual( - self.van.create_signup(100349920, 750001004, 19076, 263920, 11, 3), 14285 - ) + self.assertEqual(self.van.create_signup(100349920, 750001004, 19076, 263920, 11, 3), 14285) @requests_mock.Mocker() def test_update_signup(self, m): @@ -156,9 +121,9 @@ def test_update_signup(self, m): event_signup_id = 14285 # Get object route - m.get(self.van.connection.uri + f"signups/{event_signup_id}", json=signup) + m.get(self.van.connection.uri + f'signups/{event_signup_id}', json=signup) # Update object - m.put(self.van.connection.uri + f"signups/{event_signup_id}", status_code=204) + m.put(self.van.connection.uri + f'signups/{event_signup_id}', status_code=204) self.van.update_signup(event_signup_id, status_id=6) diff --git a/test/test_van/test_targets.py b/test/test_van/test_targets.py index be4f1dc973..07c6f0bb1b 100644 --- a/test/test_van/test_targets.py +++ b/test/test_van/test_targets.py @@ -1,17 +1,25 @@ -import unittest.mock +import unittest import os import requests_mock -from parsons import VAN, Table -import petl +from parsons.ngpvan import VAN +from parsons.etl.table import Table from test.utils import validate_list, assert_matching_tables -os.environ["VAN_API_KEY"] = "SOME_KEY" +os.environ['VAN_API_KEY'] = 'SOME_KEY' class TestTargets(unittest.TestCase): + + mock_data = mock_data = ( + '12827,Volunteer Recruitment Tiers,Tier,109957740\n' + '12827,Volunteer Recruitment Tiers,Tier,109957754') + mock_result = Table([ + ('12827', 'Volunteer Recruitment Tiers', 'Tier', '109957740'), + ('12827', 'Volunteer Recruitment Tiers', 'Tier', '109957754')]) + def setUp(self): - self.van = VAN(os.environ["VAN_API_KEY"], db="MyVoters", raise_for_status=False) + self.van = VAN(os.environ['VAN_API_KEY'], db="MyVoters", raise_for_status=False) def tearDown(self): @@ -21,38 +29,23 @@ def tearDown(self): def test_get_targets(self, m): # Create response - json = { - "count": 2, - "items": [ - { - "targetId": 12827, - "type": "TEST CODE", - "name": "TEST CODE", - "description": None, - "points": 20, - "areSubgroupsSticky": False, - "status": "Active", - "subgroups": None, - "markedSubgroup": None, - } - ], - "nextPageLink": None, - } - - m.get(self.van.connection.uri + "targets", json=json) + json = {u'count': 2, u'items': + [{u'targetId': 12827, + u'type': u'TEST CODE', + u'name': u'TEST CODE', + u'description': None, + u'points': 20, + u'areSubgroupsSticky': False, + u'status': u'Active', + u'subgroups': None, + u'markedSubgroup': None}], + u'nextPageLink': None} + + m.get(self.van.connection.uri + 'targets', json=json) # Expected Structure - expected = [ - "targetId", - "type", - "name", - "description", - "points", - "areSubgroupsSticky", - "status", - "subgroups", - "markedSubgroup", - ] + expected = ['targetId', 'type', 'name', 'description', + 'points', 'areSubgroupsSticky', 'status', 'subgroups', 'markedSubgroup'] # Assert response is expected structure self.assertTrue(validate_list(expected, self.van.get_targets())) @@ -63,27 +56,22 @@ def test_get_targets(self, m): def test_get_target(self, m): # Create response - json = { - "targetId": 15723, - "name": "Mail_VR_Chase", - "type": "Dynamic", - "description": None, - "points": 15, - "areSubgroupsSticky": False, - "status": "Active", - "subgroups": [ - { - "targetId": 12827, - "fullName": "April_VR_Chase Calls", - "name": "April_Chase_20", - "subgroupId": 46803, - "isAssociatedWithBadges": True, - } - ], - "markedSubgroup": None, - } - - m.get(self.van.connection.uri + "targets/15723", json=json) + json = {u'targetId': 15723, + u'name': u'Mail_VR_Chase', + u'type': u'Dynamic', + u'description': None, + u'points': 15, + u'areSubgroupsSticky': False, + u'status': u'Active', + u'subgroups': + [{u'targetId': 12827, + u'fullName': u'April_VR_Chase Calls', + u'name': u'April_Chase_20', + u'subgroupId': 46803, + u'isAssociatedWithBadges': True}], + u'markedSubgroup': None} + + m.get(self.van.connection.uri + 'targets/15723', json=json) self.assertEqual(json, self.van.get_target(15723)) @@ -93,79 +81,34 @@ def test_create_target_export(self, m): export_job_id = '{"exportJobId": "455961790"}' target_id = 12827 - m.post( - self.van.connection.uri + "targetExportJobs", - json=export_job_id, - status_code=204, - ) + m.post(self.van.connection.uri + 'targetExportJobs', json=export_job_id, status_code=204) # Test that it doesn't throw and error r = self.van.create_target_export(target_id, webhook_url=None) self.assertEqual(r, export_job_id) - @unittest.mock.patch.object(petl, "fromcsv", autospec=True) @requests_mock.Mocker() - def test_get_target_export(self, fromcsv, m): + def test_get_target_export(self, m): export_job_id = 455961790 - json = { + json = [{ "targetId": 12827, "file": { "downloadUrl": ( "https://ngpvan.blob.core.windows.net/" - "target-export-files/TargetExport_455961790.csv" - ), + "target-export-files/TargetExport_455961790.csv"), "dateExpired": "null", - "recordCount": 1016883, - }, + "recordCount": 1016883}, "webhookUrl": "null", "exportJobId": 455961790, - "jobStatus": "Complete", - } - - download_url = "https://ngpvan.blob.core.windows.net/target-export-files/TargetExport_455961790.csv" # noqa: E501 - fromcsv.return_value = petl.fromcolumns( - [ - ["12827", "12827"], - ["Volunteer Recruitment Tiers", "Volunteer Recruitment Tiers"], - ["1111", "1111"], - ["Tier", "Tier"], - ["109957749", "109957754"], - ], - [ - "TargetID", - "TargetName", - "TargetSubgroupID", - "TargetSubgroupName", - "VanID", - ], - ) - - m.post( - self.van.connection.uri + "targetExportJobs", - json=export_job_id, - status_code=204, - ) - m.get(self.van.connection.uri + "targetExportJobs/455961790", json=json) - - expected_result = Table( - [ - ( - "TargetID", - "TargetName", - "TargetSubgroupID", - "TargetSubgroupName", - "VanID", - ), - ("12827", "Volunteer Recruitment Tiers", "1111", "Tier", "109957749"), - ("12827", "Volunteer Recruitment Tiers", "1111", "Tier", "109957754"), - ] - ) - - assert_matching_tables( - self.van.get_target_export(export_job_id), expected_result - ) - self.assertEqual( - fromcsv.call_args, unittest.mock.call(download_url, encoding="utf-8-sig") - ) + "jobStatus": "Complete"}] + + download_url = ( + 'https://ngpvan.blob.core.windows.net/target-export-files/TargetExport_455961790.csv') + + m.post(self.van.connection.uri + 'targetExportJobs', json=export_job_id, status_code=204) + m.get(self.van.connection.uri + 'targetExportJobs/455961790', json=json) + m.get(download_url, text=self.mock_data) + assert_matching_tables(self.van.get_target_export(export_job_id), + self.mock_result) diff --git a/test/test_zoom.py b/test/test_zoom.py index c53b7696e0..c34f508033 100644 --- a/test/test_zoom.py +++ b/test/test_zoom.py @@ -1,445 +1,392 @@ import unittest -from test.utils import assert_matching_tables - import requests_mock -from parsons import Table, Zoom +from test.utils import assert_matching_tables -ACCOUNT_ID = "fakeAccountID" -CLIENT_ID = "fakeClientID" -CLIENT_SECRET = "fakeClientSecret" +from parsons.etl.table import Table +from parsons.zoom.zoom import Zoom -ZOOM_URI = "https://api.zoom.us/v2/" -ZOOM_AUTH_CALLBACK = "https://zoom.us/oauth/token" +API_KEY = 'fake_api_key' +API_SECRET = 'fake_api_secret' +ZOOM_URI = 'https://api.zoom.us/v2/' class TestZoom(unittest.TestCase): - @requests_mock.Mocker() - def setUp(self, m): - m.post(ZOOM_AUTH_CALLBACK, json={"access_token": "fakeAccessToken"}) - self.zoom = Zoom(ACCOUNT_ID, CLIENT_ID, CLIENT_SECRET) + + def setUp(self): + self.zoom = Zoom(API_KEY, API_SECRET) @requests_mock.Mocker() def test_get_users(self, m): user_json = { - "page_count": 1, - "page_number": 1, - "page_size": 30, - "total_records": 1, - "users": [ - { - "id": "C5A2nRWwTMm_hXyJb1JXMh", - "first_name": "Bob", - "last_name": "McBob", - "email": "bob@bob.com", - "type": 2, - "pmi": 8374523641, - "timezone": "America/New_York", - "verified": 1, - "dept": "", - "created_at": "2017-10-06T15:22:34Z", - "last_login_time": "2020-05-06T16:50:45Z", - "last_client_version": "", - "language": "", - "phone_number": "", - "status": "active", - } - ], - } + 'page_count': 1, + 'page_number': 1, + 'page_size': 30, + 'total_records': 1, + 'users': [{ + 'id': 'C5A2nRWwTMm_hXyJb1JXMh', + 'first_name': 'Bob', + 'last_name': 'McBob', + 'email': 'bob@bob.com', + 'type': 2, + 'pmi': 8374523641, + 'timezone': 'America/New_York', + 'verified': 1, + 'dept': '', + 'created_at': '2017-10-06T15:22:34Z', + 'last_login_time': '2020-05-06T16:50:45Z', + 'last_client_version': '', + 'language': '', + 'phone_number': '', + 'status': 'active'}]} - tbl = Table( - [ - { - "id": "C5A2nRWwTMm_hXyJb1JXMh", - "first_name": "Bob", - "last_name": "McBob", - "email": "bob@bob.com", - "type": 2, - "pmi": 8374523641, - "timezone": "America/New_York", - "verified": 1, - "dept": "", - "created_at": "2017-10-06T15:22:34Z", - "last_login_time": "2020-05-06T16:50:45Z", - "last_client_version": "", - "language": "", - "phone_number": "", - "status": "active", - } - ] - ) + tbl = Table([{'id': 'C5A2nRWwTMm_hXyJb1JXMh', + 'first_name': 'Bob', + 'last_name': 'McBob', + 'email': 'bob@bob.com', + 'type': 2, + 'pmi': 8374523641, + 'timezone': 'America/New_York', + 'verified': 1, + 'dept': '', + 'created_at': '2017-10-06T15:22:34Z', + 'last_login_time': '2020-05-06T16:50:45Z', + 'last_client_version': '', + 'language': '', + 'phone_number': '', + 'status': 'active'}]) - m.post(ZOOM_AUTH_CALLBACK, json={"access_token": "fakeAccessToken"}) - m.get(ZOOM_URI + "users", json=user_json) + m.get(ZOOM_URI + 'users', json=user_json) assert_matching_tables(self.zoom.get_users(), tbl) @requests_mock.Mocker() def test_get_meeting_participants(self, m): participants = { - "page_count": 1, - "page_size": 30, - "total_records": 4, - "next_page_token": "", - "participants": [ - { - "id": "", - "user_id": "16778240", - "name": "Barack Obama", - "user_email": "", - "join_time": "2020-04-24T21:00:26Z", - "leave_time": "2020-04-24T22:24:38Z", - "duration": 5052, - "attentiveness_score": "", - }, - { - "id": "", - "user_id": "16779264", - "name": "", - "user_email": "", - "join_time": "2020-04-24T21:00:45Z", - "leave_time": "2020-04-24T22:24:38Z", - "duration": 5033, - "attentiveness_score": "", - }, - ], - } + 'page_count': 1, + 'page_size': 30, + 'total_records': 4, + 'next_page_token': '', + 'participants': [{ + 'id': '', + 'user_id': '16778240', + 'name': 'Barack Obama', + 'user_email': '', + 'join_time': '2020-04-24T21:00:26Z', + 'leave_time': '2020-04-24T22:24:38Z', + 'duration': 5052, + 'attentiveness_score': '' + }, { + 'id': '', + 'user_id': '16779264', + 'name': '', + 'user_email': '', + 'join_time': '2020-04-24T21:00:45Z', + 'leave_time': '2020-04-24T22:24:38Z', + 'duration': 5033, + 'attentiveness_score': '' + }]} - tbl = Table( - [ - { - "id": "", - "user_id": "16778240", - "name": "Barack Obama", - "user_email": "", - "join_time": "2020-04-24T21:00:26Z", - "leave_time": "2020-04-24T22:24:38Z", - "duration": 5052, - "attentiveness_score": "", - }, - { - "id": "", - "user_id": "16779264", - "name": "", - "user_email": "", - "join_time": "2020-04-24T21:00:45Z", - "leave_time": "2020-04-24T22:24:38Z", - "duration": 5033, - "attentiveness_score": "", - }, - ] - ) + tbl = Table([{ + 'id': '', + 'user_id': '16778240', + 'name': 'Barack Obama', + 'user_email': '', + 'join_time': '2020-04-24T21:00:26Z', + 'leave_time': '2020-04-24T22:24:38Z', + 'duration': 5052, + 'attentiveness_score': '' + }, { + 'id': '', + 'user_id': '16779264', + 'name': '', + 'user_email': '', + 'join_time': '2020-04-24T21:00:45Z', + 'leave_time': '2020-04-24T22:24:38Z', + 'duration': 5033, + 'attentiveness_score': ''}]) - m.post(ZOOM_AUTH_CALLBACK, json={"access_token": "fakeAccessToken"}) - m.get(ZOOM_URI + "report/meetings/123/participants", json=participants) + m.get(ZOOM_URI + 'report/meetings/123/participants', json=participants) assert_matching_tables(self.zoom.get_past_meeting_participants(123), tbl) @requests_mock.Mocker() def test_get_meeting_registrants(self, m): registrants = { - "page_count": 1, - "page_size": 30, - "total_records": 4, - "next_page_token": "", - "registrants": [ - { - "id": "", - "user_id": "16778240", - "name": "Barack Obama", - "user_email": "", - "purchasing_time_frame": "Within a month", - "role_in_purchase_process": "Not involved", - }, - { - "id": "", - "user_id": "16779264", - "name": "", - "user_email": "", - "purchasing_time_frame": "Within a month", - "role_in_purchase_process": "Not involved", - }, - ], - } + 'page_count': 1, + 'page_size': 30, + 'total_records': 4, + 'next_page_token': '', + 'registrants': [{ + 'id': '', + 'user_id': '16778240', + 'name': 'Barack Obama', + 'user_email': '', + 'purchasing_time_frame': 'Within a month', + 'role_in_purchase_process': 'Not involved' + }, { + 'id': '', + 'user_id': '16779264', + 'name': '', + 'user_email': '', + 'purchasing_time_frame': 'Within a month', + 'role_in_purchase_process': 'Not involved' + }]} - tbl = Table( - [ - { - "id": "", - "user_id": "16778240", - "name": "Barack Obama", - "user_email": "", - "purchasing_time_frame": "Within a month", - "role_in_purchase_process": "Not involved", - }, - { - "id": "", - "user_id": "16779264", - "name": "", - "user_email": "", - "purchasing_time_frame": "Within a month", - "role_in_purchase_process": "Not involved", - }, - ] - ) + tbl = Table([ + { + 'id': '', + 'user_id': '16778240', + 'name': 'Barack Obama', + 'user_email': '', + 'purchasing_time_frame': 'Within a month', + 'role_in_purchase_process': 'Not involved' + }, + { + 'id': '', + 'user_id': '16779264', + 'name': '', + 'user_email': '', + 'purchasing_time_frame': 'Within a month', + 'role_in_purchase_process': 'Not involved' + } + ]) - m.post(ZOOM_AUTH_CALLBACK, json={"access_token": "fakeAccessToken"}) - m.get(ZOOM_URI + "meetings/123/registrants", json=registrants) + m.get(ZOOM_URI + 'meetings/123/registrants', json=registrants) assert_matching_tables(self.zoom.get_meeting_registrants(123), tbl) @requests_mock.Mocker() def test_get_user_webinars(self, m): webinars = { - "page_count": 1, - "page_size": 30, - "total_records": 4, - "next_page_token": "", - "webinars": [ - { - "uuid": "dsghfkhaewfds", - "id": "", - "host_id": "24654130000000", - "topic": "My Webinar", - "agenda": "Learn more about Zoom APIs", - "type": "5", - "duration": "60", - "start_time": "2019-09-24T22:00:00Z", - "timezone": "America/Los_Angeles", - "created_at": "2019-08-30T22:00:00Z", - "join_url": "https://zoom.us/0001000/awesomewebinar", - }, - { - "uuid": "dhf8as7dhf", - "id": "", - "host_id": "24654130000345", - "topic": "My Webinar", - "agenda": "Learn more about Zoom APIs", - "type": "5", - "duration": "60", - "start_time": "2019-09-24T22:00:00Z", - "timezone": "America/Los_Angeles", - "created_at": "2019-08-30T22:00:00Z", - "join_url": "https://zoom.us/0001000/awesomewebinar", - }, - ], - } + 'page_count': 1, + 'page_size': 30, + 'total_records': 4, + 'next_page_token': '', + 'webinars': [{ + "uuid": "dsghfkhaewfds", + "id": '', + "host_id": "24654130000000", + "topic": "My Webinar", + "agenda": "Learn more about Zoom APIs", + "type": "5", + "duration": "60", + "start_time": "2019-09-24T22:00:00Z", + "timezone": "America/Los_Angeles", + "created_at": "2019-08-30T22:00:00Z", + "join_url": "https://zoom.us/0001000/awesomewebinar" + }, { + "uuid": "dhf8as7dhf", + "id": '', + "host_id": "24654130000345", + "topic": "My Webinar", + "agenda": "Learn more about Zoom APIs", + "type": "5", + "duration": "60", + "start_time": "2019-09-24T22:00:00Z", + "timezone": "America/Los_Angeles", + "created_at": "2019-08-30T22:00:00Z", + "join_url": "https://zoom.us/0001000/awesomewebinar" + }]} - tbl = Table( - [ - { - "uuid": "dsghfkhaewfds", - "id": "", - "host_id": "24654130000000", - "topic": "My Webinar", - "agenda": "Learn more about Zoom APIs", - "type": "5", - "duration": "60", - "start_time": "2019-09-24T22:00:00Z", - "timezone": "America/Los_Angeles", - "created_at": "2019-08-30T22:00:00Z", - "join_url": "https://zoom.us/0001000/awesomewebinar", - }, - { - "uuid": "dhf8as7dhf", - "id": "", - "host_id": "24654130000345", - "topic": "My Webinar", - "agenda": "Learn more about Zoom APIs", - "type": "5", - "duration": "60", - "start_time": "2019-09-24T22:00:00Z", - "timezone": "America/Los_Angeles", - "created_at": "2019-08-30T22:00:00Z", - "join_url": "https://zoom.us/0001000/awesomewebinar", - }, - ] - ) + tbl = Table([ + { + "uuid": "dsghfkhaewfds", + "id": '', + "host_id": "24654130000000", + "topic": "My Webinar", + "agenda": "Learn more about Zoom APIs", + "type": "5", + "duration": "60", + "start_time": "2019-09-24T22:00:00Z", + "timezone": "America/Los_Angeles", + "created_at": "2019-08-30T22:00:00Z", + "join_url": "https://zoom.us/0001000/awesomewebinar" + }, + { + "uuid": "dhf8as7dhf", + "id": '', + "host_id": "24654130000345", + "topic": "My Webinar", + "agenda": "Learn more about Zoom APIs", + "type": "5", + "duration": "60", + "start_time": "2019-09-24T22:00:00Z", + "timezone": "America/Los_Angeles", + "created_at": "2019-08-30T22:00:00Z", + "join_url": "https://zoom.us/0001000/awesomewebinar" + } + ]) - m.post(ZOOM_AUTH_CALLBACK, json={"access_token": "fakeAccessToken"}) - m.get(ZOOM_URI + "users/123/webinars", json=webinars) + m.get(ZOOM_URI + 'users/123/webinars', json=webinars) assert_matching_tables(self.zoom.get_user_webinars(123), tbl) @requests_mock.Mocker() def test_get_past_webinar_participants(self, m): participants = { - "page_count": 1, - "page_size": 30, - "total_records": 4, - "next_page_token": "", - "participants": [ - { - "id": "", - "user_id": "sdfjkldsf87987", - "name": "Barack", - "user_email": "riya@sdfjkldsf87987.fdjfhdf", - "join_time": "2019-02-01T12:34:12.660Z", - "leave_time": "2019-03-01T12:34:12.660Z", - "duration": "20", - }, - { - "id": "", - "user_id": "sdfjkldsfdfgdfg", - "name": "Joe", - "user_email": "riya@sdfjkldsf87987.fdjfhdf", - "join_time": "2019-02-01T12:34:12.660Z", - "leave_time": "2019-03-01T12:34:12.660Z", - "duration": "20", - }, - ], - } + 'page_count': 1, + 'page_size': 30, + 'total_records': 4, + 'next_page_token': '', + 'participants': [{ + "id": "", + "user_id": "sdfjkldsf87987", + "name": "Barack", + "user_email": "riya@sdfjkldsf87987.fdjfhdf", + "join_time": "2019-02-01T12:34:12.660Z", + "leave_time": "2019-03-01T12:34:12.660Z", + "duration": "20" + }, { + "id": "", + "user_id": "sdfjkldsfdfgdfg", + "name": "Joe", + "user_email": "riya@sdfjkldsf87987.fdjfhdf", + "join_time": "2019-02-01T12:34:12.660Z", + "leave_time": "2019-03-01T12:34:12.660Z", + "duration": "20" + }]} - tbl = Table( - [ - { - "id": "", - "user_id": "sdfjkldsf87987", - "name": "Barack", - "user_email": "riya@sdfjkldsf87987.fdjfhdf", - "join_time": "2019-02-01T12:34:12.660Z", - "leave_time": "2019-03-01T12:34:12.660Z", - "duration": "20", - }, - { - "id": "", - "user_id": "sdfjkldsfdfgdfg", - "name": "Joe", - "user_email": "riya@sdfjkldsf87987.fdjfhdf", - "join_time": "2019-02-01T12:34:12.660Z", - "leave_time": "2019-03-01T12:34:12.660Z", - "duration": "20", - }, - ] - ) + tbl = Table([ + { + "id": "", + "user_id": "sdfjkldsf87987", + "name": "Barack", + "user_email": "riya@sdfjkldsf87987.fdjfhdf", + "join_time": "2019-02-01T12:34:12.660Z", + "leave_time": "2019-03-01T12:34:12.660Z", + "duration": "20" + }, + { + "id": "", + "user_id": "sdfjkldsfdfgdfg", + "name": "Joe", + "user_email": "riya@sdfjkldsf87987.fdjfhdf", + "join_time": "2019-02-01T12:34:12.660Z", + "leave_time": "2019-03-01T12:34:12.660Z", + "duration": "20" + } + ]) - m.post(ZOOM_AUTH_CALLBACK, json={"access_token": "fakeAccessToken"}) - m.get(ZOOM_URI + "report/webinars/123/participants", json=participants) + m.get(ZOOM_URI + 'report/webinars/123/participants', json=participants) assert_matching_tables(self.zoom.get_past_webinar_participants(123), tbl) @requests_mock.Mocker() def test_get_webinar_registrants(self, m): registrants = { - "page_count": 1, - "page_size": 30, - "total_records": 4, - "next_page_token": "", - "registrants": [ - { - "id": "", - "email": "barack@obama.com", - "first_name": "Barack", - "last_name": "Obama", - "address": "dsfhkdjsfh st", - "city": "jackson heights", - "country": "US", - "zip": "11371", - "state": "NY", - "phone": "00000000", - "industry": "Food", - "org": "Cooking Org", - "job_title": "Chef", - "purchasing_time_frame": "1-3 months", - "role_in_purchase_process": "Influencer", - "no_of_employees": "10", - "comments": "Looking forward to the Webinar", - "custom_questions": [ - { - "title": "What do you hope to learn from this Webinar?", - "value": "Look forward to learning how you come up with recipes and services", # noqa: E501 - } - ], - "status": "approved", - "create_time": "2019-02-26T23:01:16.899Z", - "join_url": "https://zoom.us/webinar/mywebinariscool", - }, - { - "id": "", - "email": "joe@biden.com", - "first_name": "Joe", - "last_name": "Biden", - "address": "dsfhkdjsfh st", - "city": "jackson heights", - "country": "US", - "zip": "11371", - "state": "NY", - "phone": "00000000", - "industry": "Food", - "org": "Cooking Org", - "job_title": "Chef", - "purchasing_time_frame": "1-3 months", - "role_in_purchase_process": "Influencer", - "no_of_employees": "10", - "comments": "Looking forward to the Webinar", - "custom_questions": [ - { - "title": "What do you hope to learn from this Webinar?", - "value": "Look forward to learning how you come up with recipes and services", # noqa: E501 - } - ], - "status": "approved", - "create_time": "2019-02-26T23:01:16.899Z", - "join_url": "https://zoom.us/webinar/mywebinariscool", - }, - ], - } + 'page_count': 1, + 'page_size': 30, + 'total_records': 4, + 'next_page_token': '', + 'registrants': [{ + "id": "", + "email": "barack@obama.com", + "first_name": "Barack", + "last_name": "Obama", + "address": "dsfhkdjsfh st", + "city": "jackson heights", + "country": "US", + "zip": "11371", + "state": "NY", + "phone": "00000000", + "industry": "Food", + "org": "Cooking Org", + "job_title": "Chef", + "purchasing_time_frame": "1-3 months", + "role_in_purchase_process": "Influencer", + "no_of_employees": "10", + "comments": "Looking forward to the Webinar", + "custom_questions": [ + { + "title": "What do you hope to learn from this Webinar?", + "value": "Look forward to learning how you come up with recipes and services" + } + ], + "status": "approved", + "create_time": "2019-02-26T23:01:16.899Z", + "join_url": "https://zoom.us/webinar/mywebinariscool" + }, { + "id": "", + "email": "joe@biden.com", + "first_name": "Joe", + "last_name": "Biden", + "address": "dsfhkdjsfh st", + "city": "jackson heights", + "country": "US", + "zip": "11371", + "state": "NY", + "phone": "00000000", + "industry": "Food", + "org": "Cooking Org", + "job_title": "Chef", + "purchasing_time_frame": "1-3 months", + "role_in_purchase_process": "Influencer", + "no_of_employees": "10", + "comments": "Looking forward to the Webinar", + "custom_questions": [ + { + "title": "What do you hope to learn from this Webinar?", + "value": "Look forward to learning how you come up with recipes and services" + } + ], + "status": "approved", + "create_time": "2019-02-26T23:01:16.899Z", + "join_url": "https://zoom.us/webinar/mywebinariscool" + }]} - tbl = Table( - [ - { - "id": "", - "email": "barack@obama.com", - "first_name": "Barack", - "last_name": "Obama", - "address": "dsfhkdjsfh st", - "city": "jackson heights", - "country": "US", - "zip": "11371", - "state": "NY", - "phone": "00000000", - "industry": "Food", - "org": "Cooking Org", - "job_title": "Chef", - "purchasing_time_frame": "1-3 months", - "role_in_purchase_process": "Influencer", - "no_of_employees": "10", - "comments": "Looking forward to the Webinar", - "custom_questions": [ - { - "title": "What do you hope to learn from this Webinar?", - "value": "Look forward to learning how you come up with recipes and services", # noqa: E501 - } - ], - "status": "approved", - "create_time": "2019-02-26T23:01:16.899Z", - "join_url": "https://zoom.us/webinar/mywebinariscool", - }, - { - "id": "", - "email": "joe@biden.com", - "first_name": "Joe", - "last_name": "Biden", - "address": "dsfhkdjsfh st", - "city": "jackson heights", - "country": "US", - "zip": "11371", - "state": "NY", - "phone": "00000000", - "industry": "Food", - "org": "Cooking Org", - "job_title": "Chef", - "purchasing_time_frame": "1-3 months", - "role_in_purchase_process": "Influencer", - "no_of_employees": "10", - "comments": "Looking forward to the Webinar", - "custom_questions": [ - { - "title": "What do you hope to learn from this Webinar?", - "value": "Look forward to learning how you come up with recipes and services", # noqa: E501 - } - ], - "status": "approved", - "create_time": "2019-02-26T23:01:16.899Z", - "join_url": "https://zoom.us/webinar/mywebinariscool", - }, - ] - ) + tbl = Table([ + { + "id": "", + "email": "barack@obama.com", + "first_name": "Barack", + "last_name": "Obama", + "address": "dsfhkdjsfh st", + "city": "jackson heights", + "country": "US", + "zip": "11371", + "state": "NY", + "phone": "00000000", + "industry": "Food", + "org": "Cooking Org", + "job_title": "Chef", + "purchasing_time_frame": "1-3 months", + "role_in_purchase_process": "Influencer", + "no_of_employees": "10", + "comments": "Looking forward to the Webinar", + "custom_questions": [ + { + "title": "What do you hope to learn from this Webinar?", + "value": "Look forward to learning how you come up with recipes and services" + } + ], + "status": "approved", + "create_time": "2019-02-26T23:01:16.899Z", + "join_url": "https://zoom.us/webinar/mywebinariscool" + }, + { + "id": "", + "email": "joe@biden.com", + "first_name": "Joe", + "last_name": "Biden", + "address": "dsfhkdjsfh st", + "city": "jackson heights", + "country": "US", + "zip": "11371", + "state": "NY", + "phone": "00000000", + "industry": "Food", + "org": "Cooking Org", + "job_title": "Chef", + "purchasing_time_frame": "1-3 months", + "role_in_purchase_process": "Influencer", + "no_of_employees": "10", + "comments": "Looking forward to the Webinar", + "custom_questions": [ + { + "title": "What do you hope to learn from this Webinar?", + "value": "Look forward to learning how you come up with recipes and services" + } + ], + "status": "approved", + "create_time": "2019-02-26T23:01:16.899Z", + "join_url": "https://zoom.us/webinar/mywebinariscool" + } + ]) - m.post(ZOOM_AUTH_CALLBACK, json={"access_token": "fakeAccessToken"}) - m.get(ZOOM_URI + "webinars/123/registrants", json=registrants) + m.get(ZOOM_URI + 'webinars/123/registrants', json=registrants) assert_matching_tables(self.zoom.get_webinar_registrants(123), tbl) diff --git a/test/utils.py b/test/utils.py index ffe208cfed..9e6f8e47d7 100644 --- a/test/utils.py +++ b/test/utils.py @@ -13,15 +13,14 @@ def test_something(): ... """ mark_live_test = pytest.mark.skipif( - not os.environ.get("LIVE_TEST"), reason="Skipping because not running live test" -) + not os.environ.get('LIVE_TEST'), reason='Skipping because not running live test') # Tests whether a table has the expected structure def validate_list(expected_keys, table): if set(expected_keys) != set(table.columns): - raise KeyError("Not all expected keys found.") + raise KeyError('Not all expected keys found.') return True @@ -38,11 +37,5 @@ def assert_matching_tables(table1, table2, ignore_headers=False): assert data1.num_rows == data2.num_rows for r1, r2 in zip(data1, data2): - # Cast both rows to lists, in case they are different types of collections. Must call - # .items() on dicts to compare content of collections - if isinstance(r1, dict): - r1 = r1.items() - if isinstance(r2, dict): - r2 = r2.items() - + # Cast both rows to lists, in case they are different types of collections assert list(r1) == list(r2) diff --git a/useful_resources/sample_code/README.md b/useful_resources/sample_code/README.md index 2d6e1811e9..064cc0baa7 100644 --- a/useful_resources/sample_code/README.md +++ b/useful_resources/sample_code/README.md @@ -12,15 +12,12 @@ Please also add your new script to the table below. # Existing Scripts -| File Name | Brief Description | Connectors Used | Written For Parsons Version | +| File Name | Brief Description | Connectors Used | Written For Parsons Version | | --------------------------- | ------------------------------------------------------------------------------ | --------------------- | --------------------------- | -| actblue_to_google_sheets.py | Get information about contributions from ActBlue and put in a new Google Sheet | ActBlue, GoogleSheets | 0.18.0 | -| apply_activist_code.py | Gets activist codes stored in Redshift and applies to users in Van | Redshift, VAN | unknown | -| civis_job_status_slack_alert.py | Posts Civis job and workflow status alerts in Slack | Slack | unknown | -| mysql_to_googlesheets.py | Queries a MySQL database and saves the results to a Google Sheet | GoogleSheets, MySQL | unknown | -| ngpvan_sample_list.py | Creates a new saved list from a random sample of an existing saved list in VAN | VAN | unknown | -| opt_outs_everyaction.py | Opts out phone numbers in EveryAction from a Redshift table | Redshift, VAN | 0.21.0 | -| s3_to_redshift.py | Moves files from S3 to Redshift | Redshift, S3 | unknown | -| s3_to_s3.py | Get files from vendor s3 bucket and moves to own S3 bucket | S3 | unknown | -| update_user_in_actionkit.py | Adds a voterbase_id (the Targetsmart ID) to users in ActionKit | Redshift, ActionKit | unknown | -| zoom_to_van.py | Adds Zoom attendees to VAN and applies an activist code | Zoom, VAN | 0.15.0 | +| apply_activist_code.py | Gets activist codes stored in Redshift and applies to users in Van | Redshift, VAN | unknown | +| s3_to_redshift.py | Moves files from S3 to Redshift | Redshift, S3 | unknown | +| s3_to_s3.py | Get files from vendor s3 bucket and moves to own S3 bucket | S3 | unknown | +| update_user_in_actionkit.py | Adds a voterbase_id (the Targetsmart ID) to users in ActionKit | Redshift, ActionKit | unknown | +| zoom_to_van.py | Adds Zoom attendees to VAN and applies an activist code | Zoom, VAN | 0.15.0 | +| ngpvan_sample_list.py | Creates a new saved list from a random sample of an existing saved list in VAN | VAN | unknown | +| actblue_to_google_sheets.py | Get information about contributions from ActBlue and put in a new Google Sheet | ActBlue, GoogleSheets | 0.18.0 | diff --git a/useful_resources/sample_code/actblue_to_google_sheets.py b/useful_resources/sample_code/actblue_to_google_sheets.py index da079678ce..5e090c8e95 100644 --- a/useful_resources/sample_code/actblue_to_google_sheets.py +++ b/useful_resources/sample_code/actblue_to_google_sheets.py @@ -15,7 +15,7 @@ # Connector 2: # Visit https://console.developers.google.com to create a Service Account and download its # credentials in a json file. Provide the path to that file for this configuration. - "GOOGLE_DRIVE_CREDENTIALS": "", + "GOOGLE_DRIVE_CREDENTIALS": "" } @@ -23,13 +23,12 @@ # Setup -import os # noqa E402 - +import os # noqa E402 os.environ["PARSONS_SKIP_IMPORT_ALL"] = "1" -from parsons import logger # noqa E402 -from parsons.actblue import ActBlue # noqa E402 -from parsons.google.google_sheets import GoogleSheets # noqa E402 +from parsons import logger # noqa E402 +from parsons.actblue import ActBlue# noqa E402 +from parsons.google.google_sheets import GoogleSheets # noqa E402 # if variables specified above, sets them as environmental variables for name, value in config_vars.items(): @@ -46,27 +45,21 @@ # already exists. The Google Sheets connector will create the new spreadsheet, with 0 rows, and # then overwrite the empty spreadsheet with your data. This script will not overwrite or update # existing Google Sheets. -spreadsheet_name = "Contributions - January" +spreadsheet_name = 'Contributions - January' # This script will create a Google Sheet with Restriced permissions -- only people specifically # added can open it. The account specifed in the Google credentials file will be the owner of # the new spreadsheet. If you created a Service Account, the Service Account will be the owner. # In this case, to receive a link to the spreadsheet and editor permissions, you must provide # your email address. Once the sheet has been created you may add user permissions in Google Sheets. -editor_email = "" +editor_email = '' if not editor_email: - raise ValueError( - "editor_email is required to enable access to the new Google Sheet" - ) + raise ValueError("editor_email is required to enable access to the new Google Sheet") # Step 2: Specify what contribution data you want from ActBlue -date_range_start = ( - "2022-01-01" # Start of date range to withdraw contribution data (inclusive). -) -date_range_end = ( - "2022-02-01" # End of date range to withdraw contribution data (exclusive). -) -csv_type = "paid_contributions" +date_range_start = '2022-01-01' # Start of date range to withdraw contribution data (inclusive). +date_range_end = '2022-02-01' # End of date range to withdraw contribution data (exclusive). +csv_type = 'paid_contributions' # csv_type options: # 'paid_contributions': # contains paid, non-refunded contributions to the entity (campaign or organization) you @@ -79,9 +72,7 @@ # form. # Step 3: Retrieve data from ActBlue and hold it in a Parsons Table. -contribution_data = actblue.get_contributions( - csv_type, date_range_start, date_range_end -) +contribution_data = actblue.get_contributions(csv_type, date_range_start, date_range_end) # Step 4: Create a spreadsheet on Google Sheets sheet_id = google_sheets.create_spreadsheet(spreadsheet_name, editor_email=editor_email) diff --git a/useful_resources/sample_code/apply_activist_code.py b/useful_resources/sample_code/apply_activist_code.py index 1ced1353b3..f9f15e0a41 100644 --- a/useful_resources/sample_code/apply_activist_code.py +++ b/useful_resources/sample_code/apply_activist_code.py @@ -20,7 +20,7 @@ "REDSHIFT_CREDENTIAL_PASSWORD": "", # Van "VAN_PASSWORD": "", - "VAN_DB_NAME": "", + "VAN_DB_NAME": "" } @@ -32,20 +32,16 @@ # Setup -for name, value in config_vars.items(): # sets variables if provided in this script +for name, value in config_vars.items(): # sets variables if provided in this script if value.strip() != "": os.environ[name] = value -rs = Redshift() # just create Redshift() - VAN connector is created dynamically below +rs = Redshift() # just create Redshift() - VAN connector is created dynamically below # Create dictionary of VAN states and API keys from multiline Civis credential -myv_states = { - x.split(",")[0]: x.split(",")[1] for x in os.environ["VAN_PASSWORD"].split("\r\n") -} -myv_keys = { - k: VAN(api_key=v, db=os.environ["VAN_DB_NAME"]) for k, v in myv_states.items() -} +myv_states = {x.split(",")[0]: x.split(",")[1] for x in os.environ['VAN_PASSWORD'].split("\r\n")} +myv_keys = {k: VAN(api_key=v, db=os.environ['VAN_DB_NAME']) for k, v in myv_states.items()} # Create simple set of states for insertion into SQL states = "','".join([s for s in myv_keys]) @@ -72,6 +68,4 @@ for vanid in state_set: # TODO: row undefined, select row form record? row = None - key.toggle_activist_code( - row["vb_smartvan_id"], row["activist_code_id"], "apply" - ) + key.toggle_activist_code(row['vb_smartvan_id'], row['activist_code_id'], 'apply') diff --git a/useful_resources/sample_code/civis_job_status_slack_alert.py b/useful_resources/sample_code/civis_job_status_slack_alert.py deleted file mode 100644 index 16504d838c..0000000000 --- a/useful_resources/sample_code/civis_job_status_slack_alert.py +++ /dev/null @@ -1,148 +0,0 @@ -# This script checks the status of all jobs and workflows in a given Civis Project -# and posts them to a Slack channel. - -import civis -import datetime -import logging -from parsons import Slack, Table - -# Environment variables -# To use the Civis connector, set the environment variables CIVIS_DATABASE and CIVIS_API_KEY. -# These environment variables are not necessary if you run this code in a Civis container script. -client = civis.APIClient() -# To use the Slack connector, set the environment variable SLACK_API_TOKEN -slack = Slack() -# More on environmental variables: -# https://move-coop.github.io/parsons/html/use_cases/contribute_use_cases.html#sensitive-information - -# Configuration variables -SLACK_CHANNEL = "" # Slack channel where the alert will post. -CIVIS_PROJECT = ( - "" # ID of the Civis project with jobs and workflows you want to see the status of. -) - -logger = logging.getLogger(__name__) -_handler = logging.StreamHandler() -_formatter = logging.Formatter("%(levelname)s %(message)s") -_handler.setFormatter(_formatter) -logger.addHandler(_handler) -logger.setLevel("INFO") - - -# Cleans up datetime format for posting to Slack. -def format_datetime(text): - - formatted_text = text.replace("Z", "") - dt = datetime.datetime.fromisoformat(formatted_text) - return dt.strftime("%Y-%m-%d") - - -# Assigns an emoji for each potential run status a Civis job or workflow might have. -def get_run_state_emoji(run_state): - - if run_state == "succeeded": - return ":white_check_mark:" - elif run_state == "failed": - return ":x:" - elif run_state == "running": - return ":runner:" - else: - return ":shrug:" - - -# Returns a Parsons table with workflow and job data from the specified Civis project. -def get_workflows_and_jobs(project_id): - - project = client.projects.get(project_id) - - # Get workflow and the job data from the project - workflows = [dict(workflow) for workflow in project["workflows"]] - table = Table(workflows) - # We need to distinguish between jobs and workflows later on, - # so adding a column noting these are workflows. - table.add_column("object_type", "workflow") - - # Imports and other scripts are separated out in the response but they are all treated as jobs - # so we pull and combine - jobs = [dict(job) for job in project["scripts"]] - imports = [dict(import_job) for import_job in project["imports"]] - full_list = jobs + imports - jobs_table = Table(full_list) - jobs_table.add_column("object_type", "job") - - # Here we combine the table of jobs and imports into the table of workflows. - # The object_type column lets us distinguish between the types, - # which is necessary for the get_last_success function. - table.concat(jobs_table) - - return table - - -# Returns the date and time of the last successful run for a Civis job or workflow. -def get_last_success(object_id, object_type): - - last_success = "-" - - if object_type == "workflow": - workflow_executions = client.workflows.list_executions( - object_id, order="updated_at" - ) - for execution in workflow_executions: - if execution["state"] != "succeeded": - continue - else: - last_success = format_datetime(execution["finished_at"]) - break - - elif object_type == "job": - job_runs = client.jobs.list_runs(object_id) - job_runs_tbl = Table([dict(run) for run in job_runs]).sort( - columns="finished_at", reverse=True - ) - for run in job_runs_tbl: - if run["state"] != "succeeded": - continue - else: - last_success = format_datetime(run["finished_at"]) - break - - else: - logger.info(f"{object_type} is not a valid object type.") - - return last_success - - -def main(): - - project_name = client.projects.get(CIVIS_PROJECT)["name"] - - scripts_table = get_workflows_and_jobs(CIVIS_PROJECT).sort( - columns=["state", "name"] - ) - - logger.info( - f"Found {scripts_table.num_rows} jobs and workflows in {project_name} project." - ) - - # This is a list of strings we will build with each job's status - output_lines = [] - - for run in scripts_table: - last_success = get_last_success(run["id"], run["object_type"]) - - output_line = f"""{get_run_state_emoji(run['state'])} - {run['name']} (last success: {last_success})""" - output_lines.append(output_line) - - # Output our message to Slack - # Combine the list of statuses into one string - line_items = "\n".join(output_lines) - message = f"*{project_name} Status*\n{line_items}" - logger.info(f"Posting message to Slack channel {SLACK_CHANNEL}") - # Post message - slack.message_channel(SLACK_CHANNEL, message, as_user=True) - logger.info("Slack message posted") - - -if __name__ == "__main__": - main() diff --git a/useful_resources/sample_code/mysql_to_googlesheets.py b/useful_resources/sample_code/mysql_to_googlesheets.py deleted file mode 100644 index fc4312de02..0000000000 --- a/useful_resources/sample_code/mysql_to_googlesheets.py +++ /dev/null @@ -1,96 +0,0 @@ -# This script runs a query against a MySQL database and saves the results to a Google Sheet. - -import logging -import time -from parsons import GoogleSheets, MySQL -from gspread.exceptions import APIError - -logger = logging.getLogger(__name__) -_handler = logging.StreamHandler() -_formatter = logging.Formatter("%(levelname)s %(message)s") -_handler.setFormatter(_formatter) -logger.addHandler(_handler) -logger.setLevel("INFO") - -# To use the MySQL connector, set the environment variables: -# MYSQL_USERNAME -# MYSQL_PASSWORD -# MYSQL_HOST -# MYSQL_DB -# MYSQL_PORT -# To use the Google Sheets connector, set the GOOGLE_DRIVE_CREDENTIALS environment variable. -# More on environmental variables: -# https://move-coop.github.io/parsons/html/use_cases/contribute_use_cases.html#sensitive-information - -# Instantiate classes -mysql = MySQL() -gsheets = GoogleSheets() - -# Configuration Variables -# FOLDER_ID is the ID of the Google Drive folder the Google Sheets workbook will be created. -FOLDER_ID = "enter_id_here" -# TITLE is the name of the Google Sheets workbook the script will create. -TITLE = "sheet_title_here" -# TAB_LABEL is the name of the tab where your query results will appear in Google Sheets. -TAB_LABEL = "tab_label_here" -# QUERY is the SQL query we will run against the MYSQL database. -QUERY = """-- Enter SQL here""" - - -# Function to add data to spreadsheet tab. -# There is a limit to the number of calls per minute, -# so we use request_count to set a maximum number of tries -def try_overwrite(table, request_count, sheet_id, tab_index): - - try: - gsheets.overwrite_sheet( - sheet_id, table, worksheet=tab_index, user_entered_value=False - ) - - except APIError as e: - print(f"trying to overwrite {tab_index} for the {request_count}th time") - if request_count > 60: - raise APIError(e) - time.sleep(80) - request_count += 1 - try_overwrite(table, request_count, sheet_id, tab_index) - - -def main(): - logger.info(f"Creating Google Sheets workbook called '{TITLE}'") - - try: - new_sheet = gsheets.create_spreadsheet( - title=TITLE, editor_email=None, folder_id=FOLDER_ID - ) - # If successful new_sheet will be the spreadsheet's ID in a string - if isinstance(new_sheet, str): - logger.info(f"Successfully created sheet {TITLE}!") - # If we do not get a string back from the create_spreadsheet call - # then something went wrong. Print the response. - else: - logger.info( - f"create_spreadsheet did not return a sheet ID. Issue: {str(new_sheet)}" - ) - - # If we get an error when trying to create the spreadsheet we print the error. - except Exception as e: - logger.info( - f"There was a problem creating the Google Sheets workbook! Error: {str(e)}" - ) - - logger.info("Querying MYSQL database...") - query_results = mysql.query(QUERY) - - logger.info( - f"Querying complete. Preparing to load data into Google Sheets tab {TAB_LABEL}" - ) - query_results.convert_columns_to_str() - request_count = 0 - tab_index = gsheets.add_sheet(new_sheet, title=TAB_LABEL) - try_overwrite(query_results, request_count, sheet_id=new_sheet, tab_index=tab_index) - logger.info(f"Load into Google Sheets for tab {TAB_LABEL} complete!") - - -if __name__ == "__main__": - main() diff --git a/useful_resources/sample_code/opt_outs_everyaction.py b/useful_resources/sample_code/opt_outs_everyaction.py deleted file mode 100644 index e6061e9e3a..0000000000 --- a/useful_resources/sample_code/opt_outs_everyaction.py +++ /dev/null @@ -1,211 +0,0 @@ -import os -import requests -import time -import json -from parsons import Redshift, Table, VAN -from parsons import logger -from datetime import datetime - -# Committee Information and Credentials - -# This script can be run against multiple EveryAction committees. -# The COMMITTEES_STR variable should be a list of committee information in JSON format. -# The information should include the committee's name, ID, and API key. -# [{"committee": "Committee 1", "committee_id": "12345", "api_key": "Committee 1 API key"}, -# {"committee": "Committee 2", "committee_id": "56789", "api_key": "Committee 2 API key"}] -# This script was originally written to run in Civis Platform, which pulls environment variables -# in as strings. -COMMITTEES_STR = os.environ["COMMITTEES_PASSWORD"] -COMMITTEES = json.loads(COMMITTEES_STR) - -# Configuration Variables - -# It is assumed that the tables below live in a schema in Redshift -# Therefore, in order to interact with them, we must name them using the format schema.table. -# More on this here: https://docs.aws.amazon.com/redshift/latest/gsg/t_creating_schema.html - -# The OPT_OUT_TABLE is a table of phones to opt out. -# The variable must be a string with the format schema.table. -# The table must contain the columns phone, committeeid, and vanid. -OPT_OUT_TABLE = os.environ["OPT_OUT_TABLE"] - -# The SUCCESS_TABLE is a table where successful opt-outs will be logged. -# The variable must be a string with the format schema.table. -# This table's columns will be: vanid, phone, committeeid, and applied_at. -SUCCESS_TABLE = os.environ["SUCCESS_TABLE"] - -# The ERROR_TABLE is a table where errors will be logged. -# The variable must be a string with the format schema.table. -# This table's columns will be : vanid, phone, committeeid, errored_at, and error. -ERROR_TABLE = os.environ["ERROR_TABLE"] - -# To use the Redshift connector, set the following environmental variables: -# REDSHIFT_USERNAME -# REDSHIFT_PASSWORD -# REDSHIFT_HOST -# REDSHIFT_DB -# REDSHIFT_PORT - -rs = Redshift() - - -def attempt_optout( - every_action, row, applied_at, committeeid, success_log, error_log, attempts_left=3 -): - - vanid = row["vanid"] - phone = row["phone"] - - # Documentation on this json construction is here - # https://docs.ngpvan.com/reference/common-models - match_json = {"phones": [{"phoneNumber": phone, "phoneOptInStatus": "O"}]} - - try: - response = every_action.update_person_json(id=vanid, match_json=match_json) - - # If the response is a dictionary the update was successful - if isinstance(response, dict): - success_log.append( - { - "vanid": response.get("vanId"), - "phone": phone, - "committeeid": committeeid, - "applied_at": applied_at, - } - ) - - return response - - # If we get an HTTP Error add it to the error log - # Usually these errors mean a vanid has been deleted from EveryAction - except requests.exceptions.HTTPError as error: - error_message = str(error)[:999] - error_log.append( - { - "vanid": vanid, - "phone": phone, - "committeeid": committeeid, - "errored_at": applied_at, - "error": error_message, - } - ) - - return error_message - - # If we get a connection error we wait a bit and try again. - except requests.exceptions.ConnectionError as connection_error: - logger.info("Got disconnected, waiting and trying again") - - while attempts_left > 0: - attempts_left -= 1 - - # Wait 10 seconds, then try again - time.sleep(10) - attempt_optout(every_action, row, attempts_left) - - else: - # If we are still getting a connection error after our maximum number of attempts - # we add the error to the log, save our full success and error logs in Redshift, - # and raise the error. - connection_error_message = str(connection_error)[:999] - - error_log.append( - { - "vanid": vanid, - "phone": phone, - "committeeid": committeeid, - "errored_at": applied_at, - "error": connection_error_message, - } - ) - - if len(success_log) > 0: - success_parsonstable = Table(success_log) - logger.info("Copying success data into log table...") - rs.copy( - success_parsonstable, - SUCCESS_TABLE, - if_exists="append", - alter_table=True, - ) - logger.info("Success log complete.") - - if len(error_log) > 0: - error_parsonstable = Table(error_log) - logger.info("Copying error data into log table...") - rs.copy( - error_parsonstable, - ERROR_TABLE, - if_exists="append", - alter_table=True, - ) - logger.info("Error log complete.") - - raise Exception(f"Connection Error {connection_error}") - - -def main(): - # Creating empty lists where we'll log successes and errors - success_log = [] - error_log = [] - - # Get the opt out data - all_opt_outs = rs.query(f"select * from {OPT_OUT_TABLE}") - - # Loop through each committee to opt-out phones - for committee in COMMITTEES: - - api_key = committee["api_key"] - committeeid = committee["committee_id"] - committee_name = committee["committee"] - - every_action = VAN(db="EveryAction", api_key=api_key) - - logger.info(f"Working on opt outs in {committee_name} committee...") - - # Here we narrow the all_opt_outs table to only the rows that correspond - # to this committee. - opt_outs = all_opt_outs.select_rows( - lambda row: str(row.committeeid) == committeeid - ) - - logger.info( - f"Found {opt_outs.num_rows} phones to opt out in {committee_name} committee..." - ) - - # Now we actually update the records - - if opt_outs.num_rows > 0: - - for opt_out in opt_outs: - - applied_at = str(datetime.now()).split(".")[0] - attempt_optout( - every_action, - opt_out, - applied_at, - committeeid, - success_log, - error_log, - ) - - # Now we log results - logger.info(f"There were {len(success_log)} successes and {len(error_log)} errors.") - - if len(success_log) > 0: - success_parsonstable = Table(success_log) - logger.info("Copying success data into log table...") - rs.copy( - success_parsonstable, SUCCESS_TABLE, if_exists="append", alter_table=True - ) - logger.info("Success log complete.") - - if len(error_log) > 0: - error_parsonstable = Table(error_log) - logger.info("Copying error data into log table...") - rs.copy(error_parsonstable, ERROR_TABLE, if_exists="append", alter_table=True) - logger.info("Error log complete.") - - -if __name__ == "__main__": - main() diff --git a/useful_resources/sample_code/s3_to_redshift.py b/useful_resources/sample_code/s3_to_redshift.py index 1917966395..bbfeb54ac5 100644 --- a/useful_resources/sample_code/s3_to_redshift.py +++ b/useful_resources/sample_code/s3_to_redshift.py @@ -32,7 +32,7 @@ # Setup -for name, value in config_vars.items(): # sets variables if provided in this script +for name, value in config_vars.items(): # sets variables if provided in this script if value.strip() != "": os.environ[name] = value @@ -41,7 +41,7 @@ # Code -bucket = os.environ["BUCKET"] +bucket = os.environ['BUCKET'] keys = s3.list_keys(bucket) files = keys.keys() @@ -56,7 +56,7 @@ table = Table.from_csv(file, encoding="ISO-8859-1") table_name = f"schema.{x.replace('.csv', '')}" try: - table.to_redshift(table_name, if_exists="truncate") + table.to_redshift(table_name, if_exists='truncate') except Exception: - table.to_redshift(table_name, if_exists="drop") + table.to_redshift(table_name, if_exists='drop') utilities.files.close_temp_file(file) diff --git a/useful_resources/sample_code/s3_to_s3.py b/useful_resources/sample_code/s3_to_s3.py index 10509d59db..80fb7e3963 100644 --- a/useful_resources/sample_code/s3_to_s3.py +++ b/useful_resources/sample_code/s3_to_s3.py @@ -11,12 +11,12 @@ # these with empty strings. We recommend using environmental variables if possible. config_vars = { - # S3 (source) - "AWS_SOURCE_ACCESS_KEY_ID": "", - "AWS_SOURCE_SECRET_ACCESS_KEY": "", - # S3 (destination) - "AWS_DESTINATION_SECRET_ACCESS_KEY": "", - "AWS_DESTINATION_ACCESS_KEY_ID": "", + # S3 (source) + "AWS_SOURCE_ACCESS_KEY_ID": "", + "AWS_SOURCE_SECRET_ACCESS_KEY": "", + # S3 (destination) + 'AWS_DESTINATION_SECRET_ACCESS_KEY': "", + 'AWS_DESTINATION_ACCESS_KEY_ID': "" } DESTINATION_BUCKET = None @@ -29,17 +29,13 @@ # Setup -for name, value in config_vars.items(): # sets variables if provided in this script +for name, value in config_vars.items(): # sets variables if provided in this script if value.strip() != "": os.environ[name] = value -s3_source = S3( - os.environ["AWS_SOURCE_ACCESS_KEY_ID"], os.environ["AWS_SOURCE_SECRET_ACCESS_KEY"] -) +s3_source = S3(os.environ['AWS_SOURCE_ACCESS_KEY_ID'], os.environ['AWS_SOURCE_SECRET_ACCESS_KEY']) s3_destination = S3( - os.environ["AWS_DESTINATION_ACCESS_KEY_ID"], - os.environ["AWS_DESTINATION_SECRET_ACCESS_KEY"], -) + os.environ['AWS_DESTINATION_ACCESS_KEY_ID'], os.environ['AWS_DESTINATION_SECRET_ACCESS_KEY']) # Let's write some code! diff --git a/useful_resources/sample_code/template_script.py b/useful_resources/sample_code/template_script.py index b4deb15156..bff35e9617 100644 --- a/useful_resources/sample_code/template_script.py +++ b/useful_resources/sample_code/template_script.py @@ -15,7 +15,7 @@ # Connector 1: "EXAMPLE_VARIABLE_NAME": "", # Connector 2: - "ANOTHER_EXAMPLE_VARIABLE_NAME": "", + "ANOTHER_EXAMPLE_VARIABLE_NAME": "" } diff --git a/useful_resources/sample_code/update_user_in_actionkit.py b/useful_resources/sample_code/update_user_in_actionkit.py index 95f546025c..cfbca34201 100644 --- a/useful_resources/sample_code/update_user_in_actionkit.py +++ b/useful_resources/sample_code/update_user_in_actionkit.py @@ -20,7 +20,7 @@ # ActionKit "AK_USERNAME": "", "AK_PASSWORD": "", - "AK_DOMAIN": "", + "AK_DOMAIN": "" } # ### CODE @@ -31,7 +31,7 @@ # Setup -for name, value in config_vars.items(): # sets variables if provided in this script +for name, value in config_vars.items(): # sets variables if provided in this script if value.strip() != "": os.environ[name] = value @@ -43,37 +43,35 @@ # timestamp to be used for log table timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") -loaded = [["id", "voterbase_id", "date_updated"]] # column names for log table +loaded = [['id', 'voterbase_id', 'date_updated']] # column names for log table -source_table = ( - "schema.table" # this is the table with the information I'm pushing to ActionKit -) +source_table = 'schema.table' # this is the table with the information I'm pushing to ActionKit # this is where we will log every user id that gets marked with a voterbase_id -log_table = "schema.table" +log_table = 'schema.table' logger.info("Running query to get matches...") -query = """ +query = ''' select distinct id, voterbase_id from {source_table} left join {log_table} using (id, voterbase_id) where voterbase_id is not null and date_updated is null - """ + ''' source_data = rs.query(query) if source_data.num_rows > 0: logger.info(f"Will be updating voterbase_id for {source_data.num_rows}...") for row in source_data: - user = ak.get_user(user_id=row["id"]) - user_dict = {"fields": {"vb_voterbase_id": row["voterbase_id"]}} - update_user = ak.update_user(user_id=row["id"], **user_dict) - user = ak.get_user(user_id=row["id"]) - if user["fields"]["vb_voterbase_id"] == row["voterbase_id"]: - loaded.append([row["id"], row["voterbase_id"], timestamp]) + user = ak.get_user(user_id=row['id']) + user_dict = {"fields": {"vb_voterbase_id": row['voterbase_id']}} + update_user = ak.update_user(user_id=row['id'], **user_dict) + user = ak.get_user(user_id=row['id']) + if user['fields']['vb_voterbase_id'] == row['voterbase_id']: + loaded.append([row['id'], row['voterbase_id'], timestamp]) logger.info("Done with loop! Loading into log table...") - Table(loaded).to_redshift(log_table, if_exists="append") + Table(loaded).to_redshift(log_table, if_exists='append') else: logger.info("No one to update today...") diff --git a/useful_resources/sample_code/zoom_to_van.py b/useful_resources/sample_code/zoom_to_van.py index a1bad645ab..4ae7178806 100644 --- a/useful_resources/sample_code/zoom_to_van.py +++ b/useful_resources/sample_code/zoom_to_van.py @@ -13,17 +13,13 @@ "ZOOM_API_KEY": "", "ZOOM_API_SECRET": "", # Van Authentication - "VAN_API_KEY": "", + "VAN_API_KEY": "" } VAN_DB = "MyCampaign" # one of: MyMembers, EveryAction, MyCampaign (not MyVoters) -ACTIVIST_CODE_NAME = ( - "" # name of VAN activist code, which must be created manually in VAN -) +ACTIVIST_CODE_NAME = "" # name of VAN activist code, which must be created manually in VAN ZOOM_MEETING_ID = "" -MINIMUM_DURATION = ( - 0 # filters out Zoom participants who stayed for less than minimum duration -) +MINIMUM_DURATION = 0 # filters out Zoom participants who stayed for less than minimum duration # ### CODE @@ -44,39 +40,32 @@ # Gets participants from Zoom meeting participants = zoom.get_past_meeting_participants(ZOOM_MEETING_ID) -filtered_participants = participants.select_rows( - lambda row: row.duration > MINIMUM_DURATION -) +filtered_participants = participants.select_rows(lambda row: row.duration > MINIMUM_DURATION) # Coalesce the columns into something VAN expects -column_map = { - "first_name": ["fn", "first", "firstname", "first name"], - "last_name": ["ln", "last", "lastname", "last name"], - "date_of_birth": ["dob", "date of birth", "birthday"], - "email": ["email address", "email_address"], - "street_number": ["street number", "street no.", "street no", "street #"], - "street_name": ["street name", "street"], - "phone": ["phone_number", "phone #", "phone_#", "phone no.", "phone no"], - "zip": ["zip5", "zipcode", "zip code"], -} +column_map = {"first_name": ["fn", "first", "firstname", "first name"], + "last_name": ["ln", "last", "lastname", "last name"], + "date_of_birth": ["dob", "date of birth", "birthday"], + "email": ["email address", "email_address"], + "street_number": ["street number", "street no.", "street no", "street #"], + "street_name": ["street name", "street"], + "phone": ["phone_number", "phone #", "phone_#", "phone no.", "phone no"], + "zip": ["zip5", "zipcode", "zip code"]} filtered_participants.map_and_coalesce_columns(column_map) # Gets activist code id given code name activist_code_id = None for code in van.get_activist_codes(): - if code["name"] == ACTIVIST_CODE_NAME: - activist_code_id = code["activistCodeId"] + if code['name'] == ACTIVIST_CODE_NAME: + activist_code_id = code['activistCodeId'] for participant in filtered_participants: # generates list of parameters from matched columns, only inlcudes if row has data for column params = {col: participant[col] for col in column_map.keys() if participant[col]} - van_person = van.upsert_person( - **params - ) # updates if it finds a match, or inserts new user + van_person = van.upsert_person(**params) # updates if it finds a match, or inserts new user if activist_code_id: - van.apply_activist_code( - id=van_person["vanId"], activist_code_id=activist_code_id, id_type="vanid" - ) + van.apply_activist_code(id=van_person['vanId'], activist_code_id=activist_code_id, + id_type="vanid")