Skip to content

Commit

Permalink
Revert "Start incorporating type checker to GUI file (#47)"
Browse files Browse the repository at this point in the history
This reverts commit 92a66ff.
  • Loading branch information
Akuli committed Mar 27, 2021
1 parent 92a66ff commit 1b16b03
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 139 deletions.
24 changes: 0 additions & 24 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,26 +1,2 @@
# Upwatch
An Upwork webscraper that will notify you of newly published job posts in your field of work

# PyQt5

# MyPy
python3 -m pip install mypy

mypy *.py


# PyQt5 Stub files for MyPy
python3 -m pip install PyQt5-stubs


# black
python3 -m pip install black

black *.py



#LOGIC
#BeautifulSoup
#Requests
#LXML
54 changes: 16 additions & 38 deletions upwatch.py
Original file line number Diff line number Diff line change
@@ -1,41 +1,19 @@
from bs4 import BeautifulSoup # type: ignore
from typing import TypedDict
import requests
from bs4 import BeautifulSoup # type: ignore
import json
import time
import pathlib
from typing import Optional, List, Tuple
from typing import Any, List

# !import re # For looking for eventual word counts in job posts & controlling the validity of url input.
# TODO: use TypedDict for these
JsonContent = Any
JobPost = Any

JobPost = TypedDict(
"JobPost",
{
"Job Title": str,
"Payment Type": str,
"Budget": str,
"Job Description": str,
"Job Post URL": str,
},
)

JsonContent = TypedDict(
"JsonContent",
{
"Requests URL": str,
"Run on startup": bool,
"Scrape interval": int,
"DBMR": bool,
"Fixed Lowest Rate": int,
"Hourly Lowest Rate": int,
"Ignore no budget": bool,
"Job Posts": Optional[List[JobPost]],
},
)
# !import re # For looking for eventual word counts in job posts & controlling the validity of url input.


# TODO: Add to json: user agent
def read_from_json(json_path: pathlib.Path) -> Tuple[JsonContent, bool]:
def read_from_json(json_path: pathlib.Path) -> JsonContent:
""" Reads all the job posts from job_posts.json """
try:
with open(json_path / "job_posts.json", "r") as job_posts_json:
Expand Down Expand Up @@ -146,14 +124,14 @@ def job_post_scraper(json_content: JsonContent) -> List[JobPost]:
) # TODO: Figure out how to fetch User Agent on current system.
response.raise_for_status()
break
# except requests.exceptions.HTTPError as errh: # TODO Error messages need to be communicated to user in a different way.
# print("HTTP Error:", errh)
# print("Please try a different URL")
# return
# except requests.exceptions.ConnectionError:
# print("Error Connecting")
# print("Please check you internet connection and try again.")
# return
# except requests.exceptions.HTTPError as errh: # TODO Error messages need to be communicated to user in a different way.
# print("HTTP Error:", errh)
# print("Please try a different URL")
# return
# except requests.exceptions.ConnectionError:
# print("Error Connecting")
# print("Please check you internet connection and try again.")
# return
except requests.exceptions.Timeout:
print("Your request timed out.")
if connection_attempts == 3:
Expand Down Expand Up @@ -185,7 +163,7 @@ def job_post_scraper(json_content: JsonContent) -> List[JobPost]:

job_post_url = job_post.find("a", class_="job-title-link").attrs["href"]

job_post_dict: JobPost = {
job_post_dict = {
"Job Title": job_title,
"Payment Type": job_payment_type,
"Budget": job_budget,
Expand Down
Loading

0 comments on commit 1b16b03

Please sign in to comment.