Skip to content

Commit

Permalink
Enabling tests for python 3, fixing all python 3 incompatibilities.
Browse files Browse the repository at this point in the history
  • Loading branch information
Jon Wayne Parrott committed Sep 2, 2015
1 parent 25d0540 commit 7957dea
Show file tree
Hide file tree
Showing 15 changed files with 123 additions and 199 deletions.
11 changes: 11 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@ For more detailed introduction to a product, check the README in the correspondi

## Testing

The tests in this repository run against live services, therefore, it takes a bit
of configuration to run all of the tests locally.

### Local setup

Before you can run tests locally you must have:
Expand All @@ -26,7 +29,9 @@ Before you can run tests locally you must have:
$ curl https://sdk.cloud.google.com | bash

* Most tests require you to have an active, billing-enabled project on the [Google Developers Console](https://console.developers.google.com).

* You will need a set of [Service Account Credentials](https://console.developers.google.com/project/_/apiui/credential) for your project in ``json`` form.

* Set the environment variables appropriately for your project.

$ export GOOGLE_APPLICATION_CREDENTIALS=your-service-account-json-file
Expand All @@ -43,6 +48,12 @@ If you want to run the Google App Engine tests, you will need:

$ export GAE_PYTHONPATH=~/google-cloud-sdk/platform/google_appengine

To run the bigquery tests, you'll need to create a bigquery dataset:

* Create a dataset in your project named `test_dataset`.
* Create a table named `test_table2`, give it the following schema:


### Test environments

We use [tox](https://tox.readthedocs.org/en/latest/) to configure multiple python environments:
Expand Down
14 changes: 7 additions & 7 deletions bigquery/samples/async_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function # For python 2/3 interoperability

import json
import uuid

from six.moves import input

from bigquery.samples.utils import get_service
from bigquery.samples.utils import paging
from bigquery.samples.utils import poll_job
Expand Down Expand Up @@ -70,13 +70,13 @@ def run(project_id, query_string, batch, num_retries, interval):

# [START main]
def main():
project_id = raw_input("Enter the project ID: ")
query_string = raw_input("Enter the Bigquery SQL Query: ")
batch = raw_input("Run query as batch (y/n)?: ") in (
project_id = input("Enter the project ID: ")
query_string = input("Enter the Bigquery SQL Query: ")
batch = input("Run query as batch (y/n)?: ") in (
'True', 'true', 'y', 'Y', 'yes', 'Yes')
num_retries = int(raw_input(
num_retries = int(input(
"Enter number of times to retry in case of 500 error: "))
interval = raw_input(
interval = input(
"Enter how often to poll the query for completion (seconds): ")

for result in run(project_id, query_string, batch, num_retries, interval):
Expand Down
16 changes: 9 additions & 7 deletions bigquery/samples/export_data_to_cloud_storage.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
#
import uuid

from six.moves import input

from bigquery.samples.utils import get_service
from bigquery.samples.utils import poll_job

Expand Down Expand Up @@ -82,19 +84,19 @@ def run(cloud_storage_path,

# [START main]
def main():
projectId = raw_input("Enter the project ID: ")
datasetId = raw_input("Enter a dataset ID: ")
tableId = raw_input("Enter a table name to copy: ")
cloud_storage_path = raw_input(
projectId = input("Enter the project ID: ")
datasetId = input("Enter a dataset ID: ")
tableId = input("Enter a table name to copy: ")
cloud_storage_path = input(
"Enter a Google Cloud Storage URI: ")
interval = raw_input(
interval = input(
"Enter how often to poll the job (in seconds): ")
num_retries = raw_input(
num_retries = input(
"Enter the number of retries in case of 500 error: ")

run(cloud_storage_path,
projectId, datasetId, tableId,
num_retries, interval)

print 'Done exporting!'
print('Done exporting!')
# [END main]
7 changes: 3 additions & 4 deletions bigquery/samples/list_datasets_projects.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,18 +31,17 @@
where <project-id> is the id of the developers console [3] project you'd like
to list the bigquery datasets and projects for.
[1] https://developers.google.com/identity/protocols/application-default-credentials#howtheywork
[1] https://developers.google.com/identity/protocols/\
application-default-credentials#howtheywork
[2] https://cloud.google.com/sdk/
[3] https://console.developers.google.com
""" # NOQA

import argparse
from pprint import pprint

from urllib2 import HTTPError

from apiclient import discovery

from six.moves.urllib.error import HTTPError
from oauth2client.client import GoogleCredentials


Expand Down
16 changes: 8 additions & 8 deletions bigquery/samples/load_data_by_post.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
#
import json

from bigquery.samples.utils import get_service, poll_job

import httplib2

from six.moves import input
from oauth2client.client import GoogleCredentials

from bigquery.samples.utils import get_service, poll_job


# [START make_post]
def make_post(http, schema, data, projectId, datasetId, tableId):
Expand Down Expand Up @@ -75,16 +75,16 @@ def make_post(http, schema, data, projectId, datasetId, tableId):
def main():
credentials = GoogleCredentials.get_application_default()
http = credentials.authorize(httplib2.Http())
projectId = raw_input('Enter the project ID: ')
datasetId = raw_input('Enter a dataset ID: ')
tableId = raw_input('Enter a table name to load the data to: ')
schema_path = raw_input(
projectId = input('Enter the project ID: ')
datasetId = input('Enter a dataset ID: ')
tableId = input('Enter a table name to load the data to: ')
schema_path = input(
'Enter the path to the schema file for the table: ')

with open(schema_path, 'r') as schema_file:
schema = schema_file.read()

data_path = raw_input('Enter the path to the data file: ')
data_path = input('Enter the path to the data file: ')

with open(data_path, 'r') as data_file:
data = data_file.read()
Expand Down
16 changes: 9 additions & 7 deletions bigquery/samples/load_data_from_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
import json
import uuid

from six.moves import input

from bigquery.samples.utils import get_service, poll_job


Expand Down Expand Up @@ -81,20 +83,20 @@ def run(source_schema, source_csv,

# [START main]
def main():
projectId = raw_input("Enter the project ID: ")
datasetId = raw_input("Enter a dataset ID: ")
tableId = raw_input("Enter a destination table name: ")
projectId = input("Enter the project ID: ")
datasetId = input("Enter a dataset ID: ")
tableId = input("Enter a destination table name: ")

schema_file_path = raw_input(
schema_file_path = input(
"Enter the path to the table schema: ")
with open(schema_file_path, 'r') as schema_file:
schema = json.load(schema_file)

data_file_path = raw_input(
data_file_path = input(
"Enter the Cloud Storage path for the CSV file: ")
num_retries = raw_input(
num_retries = input(
"Enter number of times to retry in case of 500 error: ")
interval = raw_input(
interval = input(
"Enter how often to poll the query for completion (seconds): ")
run(schema,
data_file_path,
Expand Down
18 changes: 9 additions & 9 deletions bigquery/samples/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function

import ast
import json
import uuid

from six.moves import input

from bigquery.samples.utils import get_service


Expand Down Expand Up @@ -57,18 +57,18 @@ def run(project_id, dataset_id, table_id, rows, num_retries):

# [START main]
def get_rows():
line = raw_input("Enter a row (python dict) into the table: ")
line = input("Enter a row (python dict) into the table: ")
while line:
yield ast.literal_eval(line)
line = raw_input("Enter another row into the table \n" +
"[hit enter to stop]: ")
line = input("Enter another row into the table \n" +
"[hit enter to stop]: ")


def main():
project_id = raw_input("Enter the project ID: ")
dataset_id = raw_input("Enter a dataset ID: ")
table_id = raw_input("Enter a table ID : ")
num_retries = int(raw_input(
project_id = input("Enter the project ID: ")
dataset_id = input("Enter a dataset ID: ")
table_id = input("Enter a table ID : ")
num_retries = int(input(
"Enter number of times to retry in case of 500 error: "))

for result in run(project_id, dataset_id, table_id,
Expand Down
12 changes: 6 additions & 6 deletions bigquery/samples/sync_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function # For python 2/3 interoperability

import json

from six.moves import input

from bigquery.samples.utils import get_service, paging


Expand Down Expand Up @@ -49,12 +49,12 @@ def run(project_id, query, timeout, num_retries):

# [START main]
def main():
project_id = raw_input("Enter the project ID: ")
query_string = raw_input("Enter the Bigquery SQL Query: ")
timeout = raw_input(
project_id = input("Enter the project ID: ")
query_string = input("Enter the Bigquery SQL Query: ")
timeout = input(
"Enter how long to wait for the query to complete in milliseconds"
"\n (if longer than 10 seconds, use an asynchronous query): ")
num_retries = int(raw_input(
num_retries = int(input(
"Enter how many times to retry in case of server error"))

for result in run(project_id, query_string, timeout, num_retries):
Expand Down
4 changes: 3 additions & 1 deletion bigquery/tests/test_async_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,9 @@ def test_async_query_runner(self):
test_project_id = os.environ.get(tests.PROJECT_ID_ENV)
answers = [test_project_id, self.constants['query'], 'n',
'1', '1']
with tests.mock_raw_input(answers):

with tests.mock_input_answers(
answers, target='bigquery.samples.async_query.input'):
main()


Expand Down
9 changes: 7 additions & 2 deletions compute/autoscaler/demo/frontend.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,15 @@
autoscaler/demo/tests/test_frontend.py
"""

import BaseHTTPServer
try:
import BaseHTTPServer
import SocketServer
except:
import http.server as BaseHTTPServer
import socketserver as SocketServer

from multiprocessing import Process
import os
import SocketServer
import sys
import time

Expand Down
15 changes: 8 additions & 7 deletions monitoring/samples/auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@
$ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/json-key.json
[1] https://developers.google.com/identity/protocols/application-default-credentials
[1] https://developers.google.com/identity/protocols/\
application-default-credentials
[2] https://console.developers.google.com/project/_/apiui/credential
""" # NOQA

Expand All @@ -62,19 +63,19 @@ def ListTimeseries(project_name, service):

timeseries = service.timeseries()

print 'Timeseries.list raw response:'
print('Timeseries.list raw response:')
try:
response = timeseries.list(
project=project_name, metric=METRIC, youngest=YOUNGEST).execute()

print json.dumps(response,
print(json.dumps(response,
sort_keys=True,
indent=4,
separators=(',', ': '))
separators=(',', ': ')))
except:
print 'Error:'
print('Error:')
for error in sys.exc_info():
print error
print(error)


def main(project_name):
Expand All @@ -87,7 +88,7 @@ def main(project_name):

if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: %s <project-name>" % sys.argv[0]
print("Usage: {} <project-name>".format(sys.argv[0]))
sys.exit(1)
main(sys.argv[1])
# [END all]
12 changes: 6 additions & 6 deletions storage/compose_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@ def main(argv):
name=filename,
bucket=args.bucket)
resp = req.execute()
print '> Uploaded source file %s' % filename
print json.dumps(resp, indent=2)
print('> Uploaded source file {}'.format(filename))
print(json.dumps(resp, indent=2))

# Construct a request to compose the source files into the destination.
compose_req_body = {
Expand All @@ -88,17 +88,17 @@ def main(argv):
destinationObject=args.destination,
body=compose_req_body)
resp = req.execute()
print '> Composed files into %s' % args.destination
print json.dumps(resp, indent=2)
print('> Composed files into {}'.format(args.destination))
print(json.dumps(resp, indent=2))

# Download and print the composed object.
req = service.objects().get_media(
bucket=args.bucket,
object=args.destination)

res = req.execute()
print '> Composed file contents:'
print res
print('> Composed file contents:')
print(res)


if __name__ == '__main__':
Expand Down
4 changes: 2 additions & 2 deletions storage/list_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def main(argv):
# specified bucket.
req = service.buckets().get(bucket=args.bucket)
resp = req.execute()
print json.dumps(resp, indent=2)
print(json.dumps(resp, indent=2))
# [END list_bucket]

# Create a request to objects.list to retrieve a list of objects.
Expand All @@ -76,7 +76,7 @@ def main(argv):
# automatically handle paging with the pageToken.
while req is not None:
resp = req.execute()
print json.dumps(resp, indent=2)
print(json.dumps(resp, indent=2))
req = service.objects().list_next(req, resp)

if __name__ == '__main__':
Expand Down
Loading

0 comments on commit 7957dea

Please sign in to comment.