Skip to content

Commit

Permalink
adding blank starter files to finished files
Browse files Browse the repository at this point in the history
  • Loading branch information
Rick Barraza committed Dec 5, 2017
1 parent adcdd39 commit fe4fb73
Show file tree
Hide file tree
Showing 18 changed files with 4,919 additions and 4,212 deletions.
737 changes: 3 additions & 734 deletions 01 An Introduction to Jupyter Notebooks.ipynb

Large diffs are not rendered by default.

259 changes: 0 additions & 259 deletions 02 Calling Cognitive Services.ipynb
Original file line number Diff line number Diff line change
@@ -1,44 +1,5 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "test() missing 1 required positional argument: 'self'",
"output_type": "error",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-1-e92a67cedd97>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[0mhelper\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mHelpers\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mExquisite_Corpse\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 5\u001b[1;33m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mhelper\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtest\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;31mTypeError\u001b[0m: test() missing 1 required positional argument: 'self'"
]
}
],
"source": [
"import Helpers\n",
"\n",
"helper = Helpers.Exquisite_Corpse\n",
"\n",
"print(helper.test())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
Expand All @@ -56,226 +17,6 @@
"\n",
"Let's give the service a URL to an image we find online and ask it to give us a description of it."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# WE EXPLORED PIL IN OUR FIRST WORKBOOK\n",
"from PIL import Image\n",
"\n",
"# 'NUMPY' BRINGS US NUMBER-PYTHON GOODIES TO DO SOME CRAZY MATH FOR US (SO WE DON'T HAVE TO)\n",
"import numpy as np\n",
"# WE NEED 'REQUESTS' SINCE WE ARE GOING TO BE CALLING, OR MAKING REQUESTS, TO THE COGNITIVE SERVICES WEBSITE\n",
"import requests\n",
"# WE WILL BE USING JSON AS OUR DATA FORMAT, SO LETS MAKE SURE WE CAN EASILY PARSE THE ANSWERS THEY GIVE US\n",
"from json import JSONEncoder\n",
"# WE SHOULD LOAD INTERNET IMAGES AS MACHINE READABLE PIXEL DATA, OR BYTES, AND NOT JUST TREAT IT AS TEXT (THE DEFAULT)\n",
"from io import BytesIO\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"imageURL = \"https://pbs.twimg.com/media/DNV_ZH4X0AAZIPv.jpg\"\n",
"\n",
"imageResponse = requests.get(imageURL)\n",
"image = Image.open(BytesIO(imageResponse.content))\n",
"image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"serviceURL = 'https://westus.api.cognitive.microsoft.com/vision/v1.0/analyze'\n",
"mySuperSecretKey = '..REPLACE THIS WITH YOUR OWN KEY FROM COGNITIVE SERVICES..'\n",
"\n",
"# SETUP OUR REQUEST HEADER\n",
"headers = dict()\n",
"headers['Ocp-Apim-Subscription-Key'] = mySuperSecretKey\n",
"params = { 'visualFeatures' : 'Color, Description, Tags'}\n",
"\n",
"def GetDescriptionFromURL():\n",
" json = { 'url': imageURL }\n",
" headers['Content-Type'] = 'application/json'\n",
" response = requests.request( 'post', serviceURL, json = json, data = None, headers = headers, params = params)\n",
" if response.status_code == 200:\n",
" return response.json()\n",
" else:\n",
" return \"! ERROR: Something went wrong\"\n",
"\n",
" \n",
"result = GetDescriptionFromURL()\n",
"print(\"DESCRIPTION: \", result['description']['captions'][0])\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We can see all the data that was returned as JSON in the result if we print out the whole thing:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"result"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Finally, let's try a local file.\n",
"\n",
"The solution is almost the same, but first let's load a local Image and save a copy of all its pixel data or bytes.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sendBytes = BytesIO()\n",
"\n",
"localImage = Image.open('images/martha.png')\n",
"localImage.save(sendBytes, \"PNG\")\n",
"sendBytes.seek(0)\n",
"\n",
"localImage"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"And now we tweak our call to the server to send it the byte data of the local image and tweaking the header to let the service know we are sending a file stream and not just a url."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"def DescriptionFromLocalFile(file_data):\n",
" headers['Content-Type'] = 'application/octet-stream'\n",
" response = requests.request('post', serviceURL, json = None, data = file_data, headers = headers, params = params)\n",
" if response.status_code == 200:\n",
" return response.json()\n",
" else:\n",
" return \"! ERROR: Something went wrong\"\n",
"\n",
" \n",
"result = DescriptionFromLocalFile(sendBytes)\n",
"print(\"DESCRIPTION: \", result['description']['captions'][0]) \n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# FACE ANALYSIS"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install cognitive_face"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import cognitive_face as CF\n",
"\n",
"CF.Key.set('..REPLACE THIS WITH YOUR OWN KEY FROM COGNITIVE SERVICES..')\n",
"CF.BaseUrl.set('https://westus.api.cognitive.microsoft.com/face/v1.0/')\n",
"\n",
"imageURL"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"image"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"faces = CF.face.detect(imageURL, False, False, 'age,gender')\n",
"print(faces)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for i in faces:\n",
" print(i['faceRectangle'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from PIL import ImageDraw\n",
"from PIL import ImageFont\n",
"\n",
"imageResponse = requests.get(imageURL)\n",
"image = Image.open(BytesIO(imageResponse.content))\n",
"\n",
"# ON WINDOWS, YOU CAN DO THIS...\n",
"font = ImageFont.truetype(\"Roboto-Thin.ttf\", 18)\n",
"\n",
"draw = ImageDraw.Draw(image)\n",
"txtOutput = str(len(faces)) + \" faces found.\"\n",
"\n",
"for f in faces:\n",
" upperX = f['faceRectangle']['left']\n",
" upperY = f['faceRectangle']['top']\n",
" lowerX = f['faceRectangle']['left'] + f['faceRectangle']['width']\n",
" lowerY = f['faceRectangle']['top'] + f['faceRectangle']['height']\n",
" draw.rectangle(((upperX, upperY),(lowerX, lowerY)), \n",
" fill=None, outline=\"yellow\")\n",
" \n",
" draw.rectangle((( upperX, lowerY ), (lowerX, lowerY + 56)), fill=\"yellow\")\n",
" draw.text( (upperX + 5, lowerY), f['faceAttributes']['gender'], (0,0,0), font=font)\n",
" draw.text( (upperX + 5, lowerY + 22), \"age:\" + str(f['faceAttributes']['age']), (0,0,0), font=font)\n",
" \n",
"image"
]
}
],
"metadata": {
Expand Down
Loading

0 comments on commit fe4fb73

Please sign in to comment.