From 3de90ab2258ca194a045eb75c7ad70f42d3dd080 Mon Sep 17 00:00:00 2001 From: nonotest Date: Sun, 25 Nov 2018 20:47:06 +0800 Subject: [PATCH] initial --- .env.example | 13 + .gitignore | 4 + Gopkg.lock | 133 + Gopkg.toml | 58 + README.md | 69 + cmd/export.go | 32 + cmd/import.go | 55 + datastore/datastore.go | 18 + datastore/redis.go | 132 + docker-compose.yml | 9 + export-insta.go | 32 + export/exporter.go | 25 + export/sheet.go | 242 + ig-cookie | 1 + main.go | 26 + models/data-set.go | 29 + models/flags.go | 12 + models/user.go | 12 + sheet/client.go | 85 + source/facebook.go | 74 + source/instagram.go | 111 + source/linkedin.go | 1 + source/mock.go | 47 + source/source.go | 39 + source/twitter.go | 115 + vendor/cloud.google.com/go/AUTHORS | 15 + vendor/cloud.google.com/go/CONTRIBUTORS | 40 + vendor/cloud.google.com/go/LICENSE | 202 + .../go/compute/metadata/metadata.go | 501 + .../ChimeraCoder/anaconda/.appveyor.yml | 8 + .../ChimeraCoder/anaconda/.gitignore | 6 + .../ChimeraCoder/anaconda/.travis.yml | 36 + .../github.com/ChimeraCoder/anaconda/COPYING | 1 + .../ChimeraCoder/anaconda/Gopkg.lock | 39 + .../ChimeraCoder/anaconda/Gopkg.toml | 38 + .../github.com/ChimeraCoder/anaconda/LICENSE | 7 + .../github.com/ChimeraCoder/anaconda/README | 110 + .../ChimeraCoder/anaconda/README.md | 1 + .../ChimeraCoder/anaconda/account.go | 22 + .../ChimeraCoder/anaconda/backoff.go | 45 + .../ChimeraCoder/anaconda/blocks.go | 54 + .../ChimeraCoder/anaconda/configuration.go | 32 + .../ChimeraCoder/anaconda/directmessage.go | 15 + .../ChimeraCoder/anaconda/directmessages.go | 57 + .../ChimeraCoder/anaconda/errors.go | 114 + .../ChimeraCoder/anaconda/favorites.go | 11 + .../anaconda/friends_followers.go | 289 + .../ChimeraCoder/anaconda/geosearch.go | 57 + .../github.com/ChimeraCoder/anaconda/list.go | 26 + .../github.com/ChimeraCoder/anaconda/lists.go | 87 + .../github.com/ChimeraCoder/anaconda/log.go | 91 + .../github.com/ChimeraCoder/anaconda/media.go | 89 + .../github.com/ChimeraCoder/anaconda/mutes.go | 54 + .../ChimeraCoder/anaconda/oembed.go | 59 + .../github.com/ChimeraCoder/anaconda/place.go | 35 + .../anaconda/rate_limit_status.go | 30 + .../ChimeraCoder/anaconda/relationship.go | 40 + .../ChimeraCoder/anaconda/search.go | 57 + .../ChimeraCoder/anaconda/streaming.go | 318 + .../ChimeraCoder/anaconda/timeline.go | 45 + .../ChimeraCoder/anaconda/trends.go | 64 + .../github.com/ChimeraCoder/anaconda/tweet.go | 154 + .../ChimeraCoder/anaconda/tweets.go | 107 + .../ChimeraCoder/anaconda/twitter.go | 370 + .../ChimeraCoder/anaconda/twitter_entities.go | 74 + .../ChimeraCoder/anaconda/twitter_user.go | 53 + .../github.com/ChimeraCoder/anaconda/users.go | 89 + .../ChimeraCoder/anaconda/webhook.go | 78 + .../ChimeraCoder/tokenbucket/.gitignore | 4 + .../ChimeraCoder/tokenbucket/COPYING | 165 + .../ChimeraCoder/tokenbucket/LICENSE | 1 + .../ChimeraCoder/tokenbucket/README | 48 + .../ChimeraCoder/tokenbucket/README.md | 1 + .../ChimeraCoder/tokenbucket/tokenbucket.go | 86 + vendor/github.com/azr/backoff/.gitignore | 22 + vendor/github.com/azr/backoff/.travis.yml | 2 + vendor/github.com/azr/backoff/LICENSE | 20 + vendor/github.com/azr/backoff/README.md | 22 + vendor/github.com/azr/backoff/backoff.go | 51 + vendor/github.com/azr/backoff/exponential.go | 112 + vendor/github.com/azr/backoff/linear.go | 44 + .../dustin/go-jsonpointer/.gitignore | 2 + .../github.com/dustin/go-jsonpointer/LICENSE | 19 + .../dustin/go-jsonpointer/README.markdown | 5 + .../github.com/dustin/go-jsonpointer/bytes.go | 328 + .../github.com/dustin/go-jsonpointer/doc.go | 2 + .../github.com/dustin/go-jsonpointer/map.go | 38 + .../dustin/go-jsonpointer/reflect.go | 171 + vendor/github.com/dustin/gojson/.gitignore | 2 + vendor/github.com/dustin/gojson/LICENSE | 27 + vendor/github.com/dustin/gojson/decode.go | 1089 ++ vendor/github.com/dustin/gojson/encode.go | 1183 ++ vendor/github.com/dustin/gojson/fold.go | 143 + vendor/github.com/dustin/gojson/indent.go | 137 + vendor/github.com/dustin/gojson/scanner.go | 629 + vendor/github.com/dustin/gojson/stream.go | 200 + vendor/github.com/dustin/gojson/tags.go | 44 + .../garyburd/go-oauth/oauth/oauth.go | 707 + .../garyburd/go-oauth/oauth/oauth16.go | 13 + .../garyburd/go-oauth/oauth/oauth17.go | 12 + vendor/github.com/garyburd/redigo/LICENSE | 175 + .../garyburd/redigo/internal/commandinfo.go | 54 + .../github.com/garyburd/redigo/redis/conn.go | 673 + .../github.com/garyburd/redigo/redis/doc.go | 177 + .../github.com/garyburd/redigo/redis/go16.go | 27 + .../github.com/garyburd/redigo/redis/go17.go | 29 + .../github.com/garyburd/redigo/redis/go18.go | 9 + .../github.com/garyburd/redigo/redis/log.go | 134 + .../github.com/garyburd/redigo/redis/pool.go | 527 + .../garyburd/redigo/redis/pool17.go | 35 + .../garyburd/redigo/redis/pubsub.go | 157 + .../github.com/garyburd/redigo/redis/redis.go | 117 + .../github.com/garyburd/redigo/redis/reply.go | 479 + .../github.com/garyburd/redigo/redis/scan.go | 585 + .../garyburd/redigo/redis/script.go | 91 + vendor/github.com/golang/protobuf/AUTHORS | 3 + .../github.com/golang/protobuf/CONTRIBUTORS | 3 + vendor/github.com/golang/protobuf/LICENSE | 28 + .../github.com/golang/protobuf/proto/clone.go | 253 + .../golang/protobuf/proto/decode.go | 428 + .../golang/protobuf/proto/discard.go | 350 + .../golang/protobuf/proto/encode.go | 203 + .../github.com/golang/protobuf/proto/equal.go | 300 + .../golang/protobuf/proto/extensions.go | 543 + .../github.com/golang/protobuf/proto/lib.go | 979 ++ .../golang/protobuf/proto/message_set.go | 314 + .../golang/protobuf/proto/pointer_reflect.go | 357 + .../golang/protobuf/proto/pointer_unsafe.go | 308 + .../golang/protobuf/proto/properties.go | 544 + .../golang/protobuf/proto/table_marshal.go | 2767 ++++ .../golang/protobuf/proto/table_merge.go | 654 + .../golang/protobuf/proto/table_unmarshal.go | 2051 +++ .../github.com/golang/protobuf/proto/text.go | 843 ++ .../golang/protobuf/proto/text_parser.go | 880 ++ .../github.com/huandu/facebook/CHANGELOG.md | 123 + .../huandu/facebook/CONTRIBUTING.md | 7 + vendor/github.com/huandu/facebook/LICENSE | 19 + vendor/github.com/huandu/facebook/README.md | 422 + vendor/github.com/huandu/facebook/api.go | 181 + vendor/github.com/huandu/facebook/app.go | 290 + .../huandu/facebook/batch_result.go | 52 + .../github.com/huandu/facebook/conversion.go | 102 + vendor/github.com/huandu/facebook/error.go | 25 + vendor/github.com/huandu/facebook/filedata.go | 71 + vendor/github.com/huandu/facebook/go.mod | 1 + .../huandu/facebook/paging_result.go | 146 + vendor/github.com/huandu/facebook/params.go | 271 + vendor/github.com/huandu/facebook/result.go | 1346 ++ vendor/github.com/huandu/facebook/session.go | 660 + vendor/github.com/joho/godotenv/.gitignore | 1 + vendor/github.com/joho/godotenv/.travis.yml | 8 + vendor/github.com/joho/godotenv/LICENCE | 23 + vendor/github.com/joho/godotenv/README.md | 163 + vendor/github.com/joho/godotenv/godotenv.go | 346 + vendor/golang.org/x/net/AUTHORS | 3 + vendor/golang.org/x/net/CONTRIBUTORS | 3 + vendor/golang.org/x/net/LICENSE | 27 + vendor/golang.org/x/net/PATENTS | 22 + vendor/golang.org/x/net/context/context.go | 56 + .../x/net/context/ctxhttp/ctxhttp.go | 71 + vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/go19.go | 20 + vendor/golang.org/x/net/context/pre_go17.go | 300 + vendor/golang.org/x/net/context/pre_go19.go | 109 + vendor/golang.org/x/oauth2/.travis.yml | 13 + vendor/golang.org/x/oauth2/AUTHORS | 3 + vendor/golang.org/x/oauth2/CONTRIBUTING.md | 26 + vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 + vendor/golang.org/x/oauth2/LICENSE | 27 + vendor/golang.org/x/oauth2/README.md | 86 + .../golang.org/x/oauth2/google/appengine.go | 38 + .../x/oauth2/google/appengine_gen1.go | 77 + .../x/oauth2/google/appengine_gen2_flex.go | 27 + vendor/golang.org/x/oauth2/google/default.go | 155 + vendor/golang.org/x/oauth2/google/doc.go | 40 + vendor/golang.org/x/oauth2/google/google.go | 192 + vendor/golang.org/x/oauth2/google/jwt.go | 74 + vendor/golang.org/x/oauth2/google/sdk.go | 201 + .../x/oauth2/internal/client_appengine.go | 13 + vendor/golang.org/x/oauth2/internal/doc.go | 6 + vendor/golang.org/x/oauth2/internal/oauth2.go | 37 + vendor/golang.org/x/oauth2/internal/token.go | 275 + .../golang.org/x/oauth2/internal/transport.go | 33 + vendor/golang.org/x/oauth2/jws/jws.go | 182 + vendor/golang.org/x/oauth2/jwt/jwt.go | 162 + vendor/golang.org/x/oauth2/oauth2.go | 360 + vendor/golang.org/x/oauth2/token.go | 175 + vendor/golang.org/x/oauth2/transport.go | 144 + vendor/google.golang.org/api/AUTHORS | 10 + vendor/google.golang.org/api/CONTRIBUTORS | 55 + vendor/google.golang.org/api/LICENSE | 27 + .../api/gensupport/backoff.go | 46 + .../api/gensupport/buffer.go | 77 + .../google.golang.org/api/gensupport/doc.go | 10 + .../api/gensupport/header.go | 22 + .../google.golang.org/api/gensupport/json.go | 211 + .../api/gensupport/jsonfloat.go | 57 + .../google.golang.org/api/gensupport/media.go | 341 + .../api/gensupport/params.go | 50 + .../api/gensupport/resumable.go | 216 + .../google.golang.org/api/gensupport/retry.go | 84 + .../google.golang.org/api/gensupport/send.go | 87 + .../api/googleapi/googleapi.go | 415 + .../googleapi/internal/uritemplates/LICENSE | 18 + .../internal/uritemplates/uritemplates.go | 248 + .../googleapi/internal/uritemplates/utils.go | 17 + .../google.golang.org/api/googleapi/types.go | 202 + .../api/sheets/v4/sheets-api.json | 5979 ++++++++ .../api/sheets/v4/sheets-gen.go | 12516 ++++++++++++++++ .../google.golang.org/appengine/.travis.yml | 20 + .../appengine/CONTRIBUTING.md | 90 + vendor/google.golang.org/appengine/LICENSE | 202 + vendor/google.golang.org/appengine/README.md | 73 + .../google.golang.org/appengine/appengine.go | 131 + .../appengine/appengine_vm.go | 20 + vendor/google.golang.org/appengine/errors.go | 46 + vendor/google.golang.org/appengine/go.mod | 7 + vendor/google.golang.org/appengine/go.sum | 6 + .../google.golang.org/appengine/identity.go | 142 + .../appengine/internal/api.go | 668 + .../appengine/internal/api_classic.go | 169 + .../appengine/internal/api_common.go | 123 + .../appengine/internal/app_id.go | 28 + .../app_identity/app_identity_service.pb.go | 611 + .../app_identity/app_identity_service.proto | 64 + .../appengine/internal/base/api_base.pb.go | 308 + .../appengine/internal/base/api_base.proto | 33 + .../internal/datastore/datastore_v3.pb.go | 4367 ++++++ .../internal/datastore/datastore_v3.proto | 551 + .../appengine/internal/identity.go | 49 + .../appengine/internal/identity_classic.go | 61 + .../appengine/internal/identity_flex.go | 11 + .../appengine/internal/identity_vm.go | 134 + .../appengine/internal/internal.go | 110 + .../appengine/internal/log/log_service.pb.go | 1313 ++ .../appengine/internal/log/log_service.proto | 150 + .../appengine/internal/main.go | 15 + .../appengine/internal/main_vm.go | 48 + .../appengine/internal/metadata.go | 60 + .../internal/modules/modules_service.pb.go | 786 + .../internal/modules/modules_service.proto | 80 + .../appengine/internal/net.go | 56 + .../appengine/internal/regen.sh | 40 + .../internal/remote_api/remote_api.pb.go | 361 + .../internal/remote_api/remote_api.proto | 44 + .../appengine/internal/transaction.go | 115 + .../internal/urlfetch/urlfetch_service.pb.go | 527 + .../internal/urlfetch/urlfetch_service.proto | 64 + .../google.golang.org/appengine/namespace.go | 25 + vendor/google.golang.org/appengine/timeout.go | 20 + .../appengine/travis_install.sh | 18 + .../appengine/travis_test.sh | 12 + .../appengine/urlfetch/urlfetch.go | 210 + vendor/gopkg.in/ahmdrz/goinsta.v2/.gitignore | 33 + .../ahmdrz/goinsta.v2/CONTRIBUTING.md | 11 + vendor/gopkg.in/ahmdrz/goinsta.v2/LICENSE | 21 + vendor/gopkg.in/ahmdrz/goinsta.v2/README.md | 161 + vendor/gopkg.in/ahmdrz/goinsta.v2/account.go | 393 + vendor/gopkg.in/ahmdrz/goinsta.v2/activity.go | 219 + vendor/gopkg.in/ahmdrz/goinsta.v2/comments.go | 369 + vendor/gopkg.in/ahmdrz/goinsta.v2/const.go | 117 + vendor/gopkg.in/ahmdrz/goinsta.v2/doc.go | 17 + .../gopkg.in/ahmdrz/goinsta.v2/generator.go | 62 + vendor/gopkg.in/ahmdrz/goinsta.v2/go.mod | 1 + vendor/gopkg.in/ahmdrz/goinsta.v2/goinsta.go | 451 + .../ahmdrz/goinsta.v2/goinsta/LICENSE | 21 + vendor/gopkg.in/ahmdrz/goinsta.v2/hashtags.go | 140 + vendor/gopkg.in/ahmdrz/goinsta.v2/inbox.go | 350 + vendor/gopkg.in/ahmdrz/goinsta.v2/media.go | 927 ++ vendor/gopkg.in/ahmdrz/goinsta.v2/profiles.go | 69 + vendor/gopkg.in/ahmdrz/goinsta.v2/request.go | 184 + vendor/gopkg.in/ahmdrz/goinsta.v2/search.go | 206 + vendor/gopkg.in/ahmdrz/goinsta.v2/timeline.go | 43 + vendor/gopkg.in/ahmdrz/goinsta.v2/types.go | 344 + vendor/gopkg.in/ahmdrz/goinsta.v2/users.go | 502 + vendor/gopkg.in/ahmdrz/goinsta.v2/utils.go | 81 + 276 files changed, 70067 insertions(+) create mode 100644 .env.example create mode 100644 .gitignore create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml create mode 100644 README.md create mode 100644 cmd/export.go create mode 100644 cmd/import.go create mode 100644 datastore/datastore.go create mode 100644 datastore/redis.go create mode 100644 docker-compose.yml create mode 100644 export-insta.go create mode 100644 export/exporter.go create mode 100644 export/sheet.go create mode 100644 ig-cookie create mode 100644 main.go create mode 100644 models/data-set.go create mode 100644 models/flags.go create mode 100644 models/user.go create mode 100644 sheet/client.go create mode 100644 source/facebook.go create mode 100644 source/instagram.go create mode 100644 source/linkedin.go create mode 100644 source/mock.go create mode 100644 source/source.go create mode 100644 source/twitter.go create mode 100644 vendor/cloud.google.com/go/AUTHORS create mode 100644 vendor/cloud.google.com/go/CONTRIBUTORS create mode 100644 vendor/cloud.google.com/go/LICENSE create mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/.appveyor.yml create mode 100644 vendor/github.com/ChimeraCoder/anaconda/.gitignore create mode 100644 vendor/github.com/ChimeraCoder/anaconda/.travis.yml create mode 120000 vendor/github.com/ChimeraCoder/anaconda/COPYING create mode 100644 vendor/github.com/ChimeraCoder/anaconda/Gopkg.lock create mode 100644 vendor/github.com/ChimeraCoder/anaconda/Gopkg.toml create mode 100644 vendor/github.com/ChimeraCoder/anaconda/LICENSE create mode 100644 vendor/github.com/ChimeraCoder/anaconda/README create mode 120000 vendor/github.com/ChimeraCoder/anaconda/README.md create mode 100644 vendor/github.com/ChimeraCoder/anaconda/account.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/backoff.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/blocks.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/configuration.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/directmessage.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/directmessages.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/errors.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/favorites.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/friends_followers.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/geosearch.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/list.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/lists.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/log.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/media.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/mutes.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/oembed.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/place.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/rate_limit_status.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/relationship.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/search.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/streaming.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/timeline.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/trends.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/tweet.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/tweets.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/twitter.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/twitter_entities.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/twitter_user.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/users.go create mode 100644 vendor/github.com/ChimeraCoder/anaconda/webhook.go create mode 100644 vendor/github.com/ChimeraCoder/tokenbucket/.gitignore create mode 100644 vendor/github.com/ChimeraCoder/tokenbucket/COPYING create mode 120000 vendor/github.com/ChimeraCoder/tokenbucket/LICENSE create mode 100644 vendor/github.com/ChimeraCoder/tokenbucket/README create mode 120000 vendor/github.com/ChimeraCoder/tokenbucket/README.md create mode 100644 vendor/github.com/ChimeraCoder/tokenbucket/tokenbucket.go create mode 100644 vendor/github.com/azr/backoff/.gitignore create mode 100644 vendor/github.com/azr/backoff/.travis.yml create mode 100644 vendor/github.com/azr/backoff/LICENSE create mode 100644 vendor/github.com/azr/backoff/README.md create mode 100644 vendor/github.com/azr/backoff/backoff.go create mode 100644 vendor/github.com/azr/backoff/exponential.go create mode 100644 vendor/github.com/azr/backoff/linear.go create mode 100644 vendor/github.com/dustin/go-jsonpointer/.gitignore create mode 100644 vendor/github.com/dustin/go-jsonpointer/LICENSE create mode 100644 vendor/github.com/dustin/go-jsonpointer/README.markdown create mode 100644 vendor/github.com/dustin/go-jsonpointer/bytes.go create mode 100644 vendor/github.com/dustin/go-jsonpointer/doc.go create mode 100644 vendor/github.com/dustin/go-jsonpointer/map.go create mode 100644 vendor/github.com/dustin/go-jsonpointer/reflect.go create mode 100644 vendor/github.com/dustin/gojson/.gitignore create mode 100644 vendor/github.com/dustin/gojson/LICENSE create mode 100644 vendor/github.com/dustin/gojson/decode.go create mode 100644 vendor/github.com/dustin/gojson/encode.go create mode 100644 vendor/github.com/dustin/gojson/fold.go create mode 100644 vendor/github.com/dustin/gojson/indent.go create mode 100644 vendor/github.com/dustin/gojson/scanner.go create mode 100644 vendor/github.com/dustin/gojson/stream.go create mode 100644 vendor/github.com/dustin/gojson/tags.go create mode 100644 vendor/github.com/garyburd/go-oauth/oauth/oauth.go create mode 100644 vendor/github.com/garyburd/go-oauth/oauth/oauth16.go create mode 100644 vendor/github.com/garyburd/go-oauth/oauth/oauth17.go create mode 100644 vendor/github.com/garyburd/redigo/LICENSE create mode 100644 vendor/github.com/garyburd/redigo/internal/commandinfo.go create mode 100644 vendor/github.com/garyburd/redigo/redis/conn.go create mode 100644 vendor/github.com/garyburd/redigo/redis/doc.go create mode 100644 vendor/github.com/garyburd/redigo/redis/go16.go create mode 100644 vendor/github.com/garyburd/redigo/redis/go17.go create mode 100644 vendor/github.com/garyburd/redigo/redis/go18.go create mode 100644 vendor/github.com/garyburd/redigo/redis/log.go create mode 100644 vendor/github.com/garyburd/redigo/redis/pool.go create mode 100644 vendor/github.com/garyburd/redigo/redis/pool17.go create mode 100644 vendor/github.com/garyburd/redigo/redis/pubsub.go create mode 100644 vendor/github.com/garyburd/redigo/redis/redis.go create mode 100644 vendor/github.com/garyburd/redigo/redis/reply.go create mode 100644 vendor/github.com/garyburd/redigo/redis/scan.go create mode 100644 vendor/github.com/garyburd/redigo/redis/script.go create mode 100644 vendor/github.com/golang/protobuf/AUTHORS create mode 100644 vendor/github.com/golang/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/golang/protobuf/LICENSE create mode 100644 vendor/github.com/golang/protobuf/proto/clone.go create mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/discard.go create mode 100644 vendor/github.com/golang/protobuf/proto/encode.go create mode 100644 vendor/github.com/golang/protobuf/proto/equal.go create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 vendor/github.com/golang/protobuf/proto/lib.go create mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go create mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/huandu/facebook/CHANGELOG.md create mode 100644 vendor/github.com/huandu/facebook/CONTRIBUTING.md create mode 100644 vendor/github.com/huandu/facebook/LICENSE create mode 100644 vendor/github.com/huandu/facebook/README.md create mode 100644 vendor/github.com/huandu/facebook/api.go create mode 100644 vendor/github.com/huandu/facebook/app.go create mode 100644 vendor/github.com/huandu/facebook/batch_result.go create mode 100644 vendor/github.com/huandu/facebook/conversion.go create mode 100644 vendor/github.com/huandu/facebook/error.go create mode 100644 vendor/github.com/huandu/facebook/filedata.go create mode 100644 vendor/github.com/huandu/facebook/go.mod create mode 100644 vendor/github.com/huandu/facebook/paging_result.go create mode 100644 vendor/github.com/huandu/facebook/params.go create mode 100644 vendor/github.com/huandu/facebook/result.go create mode 100644 vendor/github.com/huandu/facebook/session.go create mode 100644 vendor/github.com/joho/godotenv/.gitignore create mode 100644 vendor/github.com/joho/godotenv/.travis.yml create mode 100644 vendor/github.com/joho/godotenv/LICENCE create mode 100644 vendor/github.com/joho/godotenv/README.md create mode 100644 vendor/github.com/joho/godotenv/godotenv.go create mode 100644 vendor/golang.org/x/net/AUTHORS create mode 100644 vendor/golang.org/x/net/CONTRIBUTORS create mode 100644 vendor/golang.org/x/net/LICENSE create mode 100644 vendor/golang.org/x/net/PATENTS create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/go19.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go19.go create mode 100644 vendor/golang.org/x/oauth2/.travis.yml create mode 100644 vendor/golang.org/x/oauth2/AUTHORS create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 vendor/golang.org/x/oauth2/LICENSE create mode 100644 vendor/golang.org/x/oauth2/README.md create mode 100644 vendor/golang.org/x/oauth2/google/appengine.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen1.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go create mode 100644 vendor/golang.org/x/oauth2/google/default.go create mode 100644 vendor/golang.org/x/oauth2/google/doc.go create mode 100644 vendor/golang.org/x/oauth2/google/google.go create mode 100644 vendor/golang.org/x/oauth2/google/jwt.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk.go create mode 100644 vendor/golang.org/x/oauth2/internal/client_appengine.go create mode 100644 vendor/golang.org/x/oauth2/internal/doc.go create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/internal/token.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go create mode 100644 vendor/golang.org/x/oauth2/jws/jws.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go create mode 100644 vendor/golang.org/x/oauth2/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/token.go create mode 100644 vendor/golang.org/x/oauth2/transport.go create mode 100644 vendor/google.golang.org/api/AUTHORS create mode 100644 vendor/google.golang.org/api/CONTRIBUTORS create mode 100644 vendor/google.golang.org/api/LICENSE create mode 100644 vendor/google.golang.org/api/gensupport/backoff.go create mode 100644 vendor/google.golang.org/api/gensupport/buffer.go create mode 100644 vendor/google.golang.org/api/gensupport/doc.go create mode 100644 vendor/google.golang.org/api/gensupport/header.go create mode 100644 vendor/google.golang.org/api/gensupport/json.go create mode 100644 vendor/google.golang.org/api/gensupport/jsonfloat.go create mode 100644 vendor/google.golang.org/api/gensupport/media.go create mode 100644 vendor/google.golang.org/api/gensupport/params.go create mode 100644 vendor/google.golang.org/api/gensupport/resumable.go create mode 100644 vendor/google.golang.org/api/gensupport/retry.go create mode 100644 vendor/google.golang.org/api/gensupport/send.go create mode 100644 vendor/google.golang.org/api/googleapi/googleapi.go create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go create mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go create mode 100644 vendor/google.golang.org/api/googleapi/types.go create mode 100644 vendor/google.golang.org/api/sheets/v4/sheets-api.json create mode 100644 vendor/google.golang.org/api/sheets/v4/sheets-gen.go create mode 100644 vendor/google.golang.org/appengine/.travis.yml create mode 100644 vendor/google.golang.org/appengine/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/appengine/LICENSE create mode 100644 vendor/google.golang.org/appengine/README.md create mode 100644 vendor/google.golang.org/appengine/appengine.go create mode 100644 vendor/google.golang.org/appengine/appengine_vm.go create mode 100644 vendor/google.golang.org/appengine/errors.go create mode 100644 vendor/google.golang.org/appengine/go.mod create mode 100644 vendor/google.golang.org/appengine/go.sum create mode 100644 vendor/google.golang.org/appengine/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/api.go create mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/api_common.go create mode 100644 vendor/google.golang.org/appengine/internal/app_id.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go create mode 100755 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto create mode 100644 vendor/google.golang.org/appengine/internal/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/internal.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/main.go create mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/metadata.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/net.go create mode 100755 vendor/google.golang.org/appengine/internal/regen.sh create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto create mode 100644 vendor/google.golang.org/appengine/internal/transaction.go create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto create mode 100644 vendor/google.golang.org/appengine/namespace.go create mode 100644 vendor/google.golang.org/appengine/timeout.go create mode 100755 vendor/google.golang.org/appengine/travis_install.sh create mode 100755 vendor/google.golang.org/appengine/travis_test.sh create mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/.gitignore create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/CONTRIBUTING.md create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/LICENSE create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/README.md create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/account.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/activity.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/comments.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/const.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/doc.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/generator.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/go.mod create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/goinsta.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/goinsta/LICENSE create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/hashtags.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/inbox.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/media.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/profiles.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/request.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/search.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/timeline.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/types.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/users.go create mode 100644 vendor/gopkg.in/ahmdrz/goinsta.v2/utils.go diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..9947a9e --- /dev/null +++ b/.env.example @@ -0,0 +1,13 @@ +TWITTER_CONSUMER_KEY= +TWITTER_CONSUMER_SECRET= +TWITTER_ACCESS_TOKEN= +TWITTER_TOKEN_SECRET= +TWITTER_USERNAME= +IG_USERNAME= +IG_PASSWORD= +IG_COOKIE_PATH= +REDIS_ADDRESS= +FACEBOOK_APP_ID= +FACEBOOK_APP_SECRET= +FACEBOOK_PAGE_NAME= +SHORT_LIVED_FACEBOOK_TOKEN= \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0f248e5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +token.json +credentials.json +.env +igcookie diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 0000000..77792bf --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,133 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "cloud.google.com/go" + packages = ["compute/metadata"] + revision = "74b12019e2aa53ec27882158f59192d7cd6d1998" + version = "v0.33.1" + +[[projects]] + name = "github.com/ChimeraCoder/anaconda" + packages = ["."] + revision = "9c68684170b980d5b4c2ed08fae9b530e659904d" + version = "v2.0.0" + +[[projects]] + branch = "master" + name = "github.com/ChimeraCoder/tokenbucket" + packages = ["."] + revision = "c5a927568de7aad8a58127d80bcd36ca4e71e454" + +[[projects]] + branch = "master" + name = "github.com/azr/backoff" + packages = ["."] + revision = "53511d3c733003985b0b76f733df1f4d0095ee6a" + +[[projects]] + branch = "master" + name = "github.com/dustin/go-jsonpointer" + packages = ["."] + revision = "ba0abeacc3dcca5b9b20f31509c46794edbc9965" + +[[projects]] + branch = "master" + name = "github.com/dustin/gojson" + packages = ["."] + revision = "2e71ec9dd5adce3b168cd0dbde03b5cc04951c30" + +[[projects]] + branch = "master" + name = "github.com/garyburd/go-oauth" + packages = ["oauth"] + revision = "bca2e7f09a178fd36b034107a00e2323bca6a82e" + +[[projects]] + name = "github.com/garyburd/redigo" + packages = [ + "internal", + "redis" + ] + revision = "a69d19351219b6dd56f274f96d85a7014a2ec34e" + version = "v1.6.0" + +[[projects]] + name = "github.com/golang/protobuf" + packages = ["proto"] + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" + +[[projects]] + name = "github.com/huandu/facebook" + packages = ["."] + revision = "ad9e534b2444ed69d73a92315c69e25d76abc430" + version = "v2.3.2" + +[[projects]] + name = "github.com/joho/godotenv" + packages = ["."] + revision = "23d116af351c84513e1946b527c88823e476be13" + version = "v1.3.0" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp" + ] + revision = "adae6a3d119ae4890b46832a2e88a95adc62b8e7" + +[[projects]] + branch = "master" + name = "golang.org/x/oauth2" + packages = [ + ".", + "google", + "internal", + "jws", + "jwt" + ] + revision = "8f65e3013ebad444f13bc19536f7865efc793816" + +[[projects]] + branch = "master" + name = "google.golang.org/api" + packages = [ + "gensupport", + "googleapi", + "googleapi/internal/uritemplates", + "sheets/v4" + ] + revision = "faade3cbb06a30202f2da53a8a5e3c4afe60b0c2" + +[[projects]] + name = "google.golang.org/appengine" + packages = [ + ".", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/urlfetch", + "urlfetch" + ] + revision = "4a4468ece617fc8205e99368fa2200e9d1fad421" + version = "v1.3.0" + +[[projects]] + name = "gopkg.in/ahmdrz/goinsta.v2" + packages = ["."] + revision = "8a4b1078ad1b865b881561251c8d5b07e0765994" + version = "v2.3" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "f1ffec4525ce4140c5eca1b7dcf1c5c63e3c419297f2c41a57f83b23e48910f4" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 0000000..8a9fc18 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,58 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + name = "github.com/ChimeraCoder/anaconda" + version = "2.0.0" + +[[constraint]] + name = "github.com/garyburd/redigo" + version = "1.6.0" + +[[constraint]] + name = "github.com/huandu/facebook" + version = "2.3.2" + +[[constraint]] + name = "github.com/joho/godotenv" + version = "1.3.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/oauth2" + +[[constraint]] + branch = "master" + name = "google.golang.org/api" + +[[constraint]] + name = "gopkg.in/ahmdrz/goinsta.v2" + version = "2.3.0" + +[prune] + go-tests = true + unused-packages = true diff --git a/README.md b/README.md new file mode 100644 index 0000000..0169f32 --- /dev/null +++ b/README.md @@ -0,0 +1,69 @@ +# Disclaimer + +This is a random script that served some internal usage. +Nothing fancy to see! + +Not much err checking/logging so dont panic if you see random panics. (PUN!) + +# Setup + +Run `docker-compose up` if you need a local redis instance. +At the moment the only datastore is redis to simplify things. + +Copy `.env.example` to `.env` and fill whatever you need. + +Later we should be able to split the package to offer: + +- a command line tool to import data +- a rest api + a web ui to visualise/export the data + +Examples: + +`go run main.go --mode=import --source=instagram --source=twitter` + +Will import list of users that follow you or you are following on IG/Twitter +with whatever details we can get. + +`go run main.go --mode=export --source=instagram --source=twitter` + +Will create a google sheet with the exported details. +There is a filter view available that allow you to sort ASC/DESC the data! + +Deps with `dep` + +# Sources + +We can import followers from the list below; + +## INSTAGRAM + +If you want to use IG run `go run export-insta.go` first to create a cookie. +The cookie will be used so you dont have to login/logout all the time. + +Unknown rate limit + +## TWITTER + +Twitter developer account (for twitter). + +## FB + +Turns out you cant really get anything interesting from this. +No more friends, no more subscribes/fans to a page... + +Get a page or page app token. + +via `https://developers.facebook.com/tools/explorer/` + +## LINKEDIN + +TODO + +# Export + +We can export to + +## GOOGLE SHEET + +Export takes 2 params: depth (how many levels do you want to go through (may be expensive in api calls)) +baseScreenName, what to use to query first level. diff --git a/cmd/export.go b/cmd/export.go new file mode 100644 index 0000000..47fb32d --- /dev/null +++ b/cmd/export.go @@ -0,0 +1,32 @@ +package cmd + +import ( + "fmt" + + "github.com/athletifit/social-network-insights/datastore" + "github.com/athletifit/social-network-insights/export" + "github.com/athletifit/social-network-insights/models" +) + +func ExportData(sourcesPtr models.ArrayFlags) { + store := datastore.NewRedisDataStore() + + // fixme change to users + sets := make([]models.DataSet, 0, 1) + + for _, s := range sourcesPtr { + users, err := store.LoadUsers(s) + if err != nil { + fmt.Printf("%+v", err) + continue + } + set := models.NewDataSet(s, *users) + sets = append(sets, set) + } + + // later, what type of exporter? + ex := export.NewSheetExporter() + + document := export.NewDocument("Influencers", sets) + ex.Export(document) +} diff --git a/cmd/import.go b/cmd/import.go new file mode 100644 index 0000000..d6f35dd --- /dev/null +++ b/cmd/import.go @@ -0,0 +1,55 @@ +package cmd + +import ( + "fmt" + "sync" + + "github.com/athletifit/social-network-insights/datastore" + "github.com/athletifit/social-network-insights/models" + "github.com/athletifit/social-network-insights/source" + "github.com/joho/godotenv" +) + +// ImportData import sources passed in cmd flags. +func ImportData(sourcesPtr models.ArrayFlags) error { + ch := make(models.UserSetChan) + wg := &sync.WaitGroup{} + + var env map[string]string + env, err := godotenv.Read() + if err != nil { + return err + } + + for _, s := range sourcesPtr { + src, err := source.NewSource(s, env) + if err != nil { + fmt.Printf("%+v", err) + continue + } + + wg.Add(1) + go src.GetUsers(ch, wg) + fmt.Printf("=== %s started === \n", s) + } + + // wait that all goroutines have sent the data to close the channel + go func() { + wg.Wait() + close(ch) + }() + + dataStore := datastore.NewRedisDataStore() + + for userSet := range ch { + + fmt.Printf("=== Done with %s ===\n", userSet.Title) + err := dataStore.SaveUsers(userSet) + if err != nil { + fmt.Printf("%+v", err) + } + } + + fmt.Println("=== Import finished ===") + return nil +} diff --git a/datastore/datastore.go b/datastore/datastore.go new file mode 100644 index 0000000..b33b80b --- /dev/null +++ b/datastore/datastore.go @@ -0,0 +1,18 @@ +package datastore + +import "github.com/athletifit/social-network-insights/models" + +type DataStore interface { + LoadUsers(source string) (*models.UserMap, error) + SaveLastTwitterCursor(cursor int64) + SaveUsers(us *models.UserSet) error +} + +func NewDataStore(store string) DataStore { + switch store { + case "redis": + return NewRedisDataStore() + } + + return nil +} diff --git a/datastore/redis.go b/datastore/redis.go new file mode 100644 index 0000000..2435d9d --- /dev/null +++ b/datastore/redis.go @@ -0,0 +1,132 @@ +package datastore + +import ( + "encoding/json" + "fmt" + "strconv" + "sync" + "time" + + "github.com/athletifit/social-network-insights/models" + "github.com/garyburd/redigo/redis" +) + +// TODO: look into TLS using stunnel. +// Spiped + +const ( + // redisSourcesIndex the index of the sources db. + redisSourcesIndex = 0 + + // RedisSourcesPoolName is the name of the sources pool. + RedisSourcesPoolName = "sources" +) + +var pools map[string]*redis.Pool +var once = sync.Once{} + +// redisDBs our maps with the mapping redis db <-> index. +var redisDBs = map[string]int{ + RedisSourcesPoolName: redisSourcesIndex, +} + +// Pool wraps a new Pool function in a once.Do to only do it once... +func Pool() map[string]*redis.Pool { + once.Do(newPools) + return pools +} + +// Init our redis pools with the different dbs. +func newPools() { + addr := "localhost:6379" // fix later use env. + pools = make(map[string]*redis.Pool, len(redisDBs)) + + for k, v := range redisDBs { + pools[k] = &redis.Pool{ + MaxIdle: 3, + IdleTimeout: 240 * time.Second, + Dial: (func(index int) func() (redis.Conn, error) { + connDialer := func() (redis.Conn, error) { + // pwd := redis.DialPassword(redisPwd) + c, err := redis.Dial("tcp", addr) + if err != nil { + return nil, err + } + + if _, err := c.Do("SELECT", strconv.Itoa(index)); err != nil { + c.Close() + return nil, err + } + + return c, nil + } + return connDialer + })(v), + } + } +} + +type RedisDataStore struct { + pools map[string]*redis.Pool +} + +// NewRedisDataStore returns a new redis data store. +func NewRedisDataStore() *RedisDataStore { + pools := Pool() + return &RedisDataStore{ + pools: pools, + } +} + +// SaveLastTwitterCursor todo when we do deeper searches on twitter. +func (rds *RedisDataStore) SaveLastTwitterCursor(cursor int64) { + fmt.Printf("Save cursor: %d", cursor) +} + +// LoadUsers loads user from a redis hash into a user map. +func (rds *RedisDataStore) LoadUsers(source string) (*models.UserMap, error) { + conn := rds.pools[RedisSourcesPoolName].Get() + defer conn.Close() + + values, err := redis.StringMap(conn.Do("HGETALL", source)) + if err != nil { + return nil, err + } + if err != nil && err != redis.ErrNil { + return nil, err + } + + users := make(models.UserMap, 0) + for k, v := range values { + var u models.User + json.Unmarshal([]byte(v), &u) + users[k] = u + } + + return &users, nil +} + +// SaveUsers persits the users of a set in a redis hash. +func (rds *RedisDataStore) SaveUsers(userSet *models.UserSet) error { + conn := rds.pools[RedisSourcesPoolName].Get() + defer conn.Close() + + // convert to something good for redis... + usersMap := make(map[string][]byte, 0) + for _, u := range userSet.Users { + if _, ok := usersMap[u.ScreenName]; !ok { + b, _ := json.Marshal(&u) + usersMap[u.ScreenName] = b + } + } + + // only if we have users to save. + if len(usersMap) > 0 { + _, err := conn.Do("HMSET", redis.Args{userSet.Title}.AddFlat(usersMap)...) + if err != nil { + return err + } + } + + return nil +} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..cf14dbf --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,9 @@ +version: '3' +services: + redis: + image: redis:4.0.1-alpine + expose: + - 6379 + ports: + - '6379:6379' + # TODO: volume for this. diff --git a/export-insta.go b/export-insta.go new file mode 100644 index 0000000..d3ed935 --- /dev/null +++ b/export-insta.go @@ -0,0 +1,32 @@ +// +build ignore + +package main + +import ( + "fmt" + + "github.com/joho/godotenv" + "gopkg.in/ahmdrz/goinsta.v2" +) + +func main() { + var env map[string]string + env, err := godotenv.Read() + if err != nil { + fmt.Printf("%+v", err) + return + } + + inst := goinsta.New(env["IG_USERNAME"], env["IG_PASSWORD"]) + err = inst.Login() + if err != nil { + fmt.Printf("%+v", err) + return + } + + err = inst.Export(env["IG_COOKIE_PATH"]) + if err != nil { + fmt.Printf("%+v", err) + return + } +} diff --git a/export/exporter.go b/export/exporter.go new file mode 100644 index 0000000..6d540ab --- /dev/null +++ b/export/exporter.go @@ -0,0 +1,25 @@ +package export + +import ( + "github.com/athletifit/social-network-insights/models" +) + +// Exporter is our main interface. +// Takes a document and is able to export it. +type Exporter interface { + Export(document Document) +} + +// Document represent the physical document we export. +type Document struct { + name string + dataSets []models.DataSet +} + +// NewDocument creates a new document. +func NewDocument(name string, dataSets []models.DataSet) Document { + return Document{ + name: name, + dataSets: dataSets, + } +} diff --git a/export/sheet.go b/export/sheet.go new file mode 100644 index 0000000..a71a954 --- /dev/null +++ b/export/sheet.go @@ -0,0 +1,242 @@ +package export + +// code from google + +import ( + "fmt" + "log" + "net/http" + + "github.com/athletifit/social-network-insights/models" + "github.com/athletifit/social-network-insights/sheet" + sheets "google.golang.org/api/sheets/v4" +) + +// SheetExporter represents a google sheet exporter. +type SheetExporter struct { + SheetClient *http.Client +} + +// NewSheetExporter returns a google sheet exporter. +func NewSheetExporter() Exporter { + sc := sheet.GetSheetClient() + return SheetExporter{ + SheetClient: sc, + } +} + +// Export is our main method. Writes the data to export. +func (se SheetExporter) Export(document Document) { + srv, err := sheets.New(se.SheetClient) + if err != nil { + log.Fatalf("Unable to retrieve Sheets client: %v", err) + return + } + + var s *sheets.Spreadsheet + create := true + if create { + s, err = se.createSheet(srv, document) + if err != nil { + log.Fatalf("Unable to write Sheet: %v", err) + return + } + } + + // Prints the url of the exported doc. + fmt.Println("Available at: " + s.SpreadsheetUrl) + +} + +func (se SheetExporter) createSheet(srv *sheets.Service, document Document) (*sheets.Spreadsheet, error) { + sheetsToWrite := se.getSheets(document.dataSets) + + rb := &sheets.Spreadsheet{ + Sheets: sheetsToWrite, + Properties: &sheets.SpreadsheetProperties{ + Title: document.name, + }, + } + + s, err := srv.Spreadsheets.Create(rb).Do() + if err != nil { + fmt.Printf("Err creating spreadsheet: %+v ", err) + return nil, err + } + + requests := make([]*sheets.Request, 0, 1) + for i, ds := range document.dataSets { + r := se.getFitlerView(s.Sheets[i].Properties.SheetId, len(ds.Data)) + req := &sheets.Request{AddFilterView: r} + requests = append(requests, req) + } + + bu := &sheets.BatchUpdateSpreadsheetRequest{ + Requests: requests, + } + + _, err = srv.Spreadsheets.BatchUpdate(s.SpreadsheetId, bu).Do() + if err != nil { + fmt.Printf("Err sorting update spreadsheet: %+v ", err) + return nil, err + } + + return s, nil +} + +func (se SheetExporter) getSheets(dataSets []models.DataSet) []*sheets.Sheet { + sheetsToWrite := make([]*sheets.Sheet, 0, 1) + + for _, d := range dataSets { + s := se.getSheet(d) + sheetsToWrite = append(sheetsToWrite, s) + } + + return sheetsToWrite +} + +func (se SheetExporter) getSheet(dataSet models.DataSet) *sheets.Sheet { + rows := make([]*sheets.RowData, 0, 1) + rows = append(rows, se.getHeaderRow()) + + for _, d := range dataSet.Data { + r := se.getUserRow(d) + rows = append(rows, r) + } + + gridData := []*sheets.GridData{&sheets.GridData{ + RowData: rows, + }} + + return &sheets.Sheet{ + Properties: &sheets.SheetProperties{ + Title: dataSet.Title, + }, + Data: gridData, + } +} + +func (se SheetExporter) getHeaderRow() *sheets.RowData { + cells := make([]*sheets.CellData, 0, 2) + + handleCell := &sheets.CellData{ + UserEnteredValue: &sheets.ExtendedValue{ + StringValue: "Screen Name", + }, + } + cells = append(cells, handleCell) + + countCell := &sheets.CellData{ + UserEnteredValue: &sheets.ExtendedValue{ + StringValue: "Followers Count", + }, + } + cells = append(cells, countCell) + + emailCell := &sheets.CellData{ + UserEnteredValue: &sheets.ExtendedValue{ + StringValue: "Email", + }, + } + cells = append(cells, emailCell) + + nameCell := &sheets.CellData{ + UserEnteredValue: &sheets.ExtendedValue{ + StringValue: "Name", + }, + } + cells = append(cells, nameCell) + + urlCell := &sheets.CellData{ + UserEnteredValue: &sheets.ExtendedValue{ + StringValue: "URL", + }, + } + cells = append(cells, urlCell) + + linkCell := &sheets.CellData{ + UserEnteredValue: &sheets.ExtendedValue{ + StringValue: "Link", + }, + } + cells = append(cells, linkCell) + + return &sheets.RowData{ + Values: cells, + } +} + +// getTwitterRow creates a sheet row with twitter data. +// may not belong here..until we use reflect to create a row out of any struct..? +func (se SheetExporter) getUserRow(u models.User) *sheets.RowData { + + cells := make([]*sheets.CellData, 0, 2) + handleCell := &sheets.CellData{ + UserEnteredValue: &sheets.ExtendedValue{ + StringValue: u.ScreenName, + }, + } + cells = append(cells, handleCell) + + countCell := &sheets.CellData{ + UserEnteredValue: &sheets.ExtendedValue{ + NumberValue: float64(u.FollowersCount), + }, + } + cells = append(cells, countCell) + + emailCell := &sheets.CellData{ + UserEnteredValue: &sheets.ExtendedValue{ + StringValue: u.Email, + }, + } + cells = append(cells, emailCell) + + nameCell := &sheets.CellData{ + UserEnteredValue: &sheets.ExtendedValue{ + StringValue: u.Name, + }, + } + cells = append(cells, nameCell) + + urlCell := &sheets.CellData{ + UserEnteredValue: &sheets.ExtendedValue{ + StringValue: u.URL, + }, + } + cells = append(cells, urlCell) + + linkCell := &sheets.CellData{ + UserEnteredValue: &sheets.ExtendedValue{ + StringValue: "https://twitter.com/" + u.ScreenName, + }, + } + cells = append(cells, linkCell) + + return &sheets.RowData{ + Values: cells, + } +} + +func (se SheetExporter) getFitlerView(sheetID int64, maxRow int) *sheets.AddFilterViewRequest { + return &sheets.AddFilterViewRequest{ + Filter: &sheets.FilterView{ + Title: "Sorted Desc", + Range: &sheets.GridRange{ + EndColumnIndex: 6, + StartColumnIndex: 0, + StartRowIndex: 0, + EndRowIndex: int64(maxRow) + 1, + SheetId: sheetID, + }, + SortSpecs: []*sheets.SortSpec{ + &sheets.SortSpec{ + SortOrder: "DESCENDING", + }, + &sheets.SortSpec{ + SortOrder: "DESCENDING", + }, + }, + }, + } +} diff --git a/ig-cookie b/ig-cookie new file mode 100644 index 0000000..eecc980 --- /dev/null +++ b/ig-cookie @@ -0,0 +1 @@ +{"id":9196830353,"username":"amoaffiliates","device_id":"android-2f813ee100584ecb","uuid":"3c9c0391-20af-4bd1-a201-4b0782df9858","rank_token":"9196830353_3c9c0391-20af-4bd1-a201-4b0782df9858","token":"PYSk4N2zewjc9Qv8MvatZlRgTj9LsCQ2","phone_id":"cdcdd599-761d-44f6-8abe-b26e017415f1","cookies":[{"Name":"rur","Value":"FRC","Path":"","Domain":"","Expires":"0001-01-01T00:00:00Z","RawExpires":"","MaxAge":0,"Secure":false,"HttpOnly":false,"Raw":"","Unparsed":null},{"Name":"mid","Value":"W_qQxQABAAErB8LzprXEy-9boeLp","Path":"","Domain":"","Expires":"0001-01-01T00:00:00Z","RawExpires":"","MaxAge":0,"Secure":false,"HttpOnly":false,"Raw":"","Unparsed":null},{"Name":"mcd","Value":"3","Path":"","Domain":"","Expires":"0001-01-01T00:00:00Z","RawExpires":"","MaxAge":0,"Secure":false,"HttpOnly":false,"Raw":"","Unparsed":null},{"Name":"csrftoken","Value":"PYSk4N2zewjc9Qv8MvatZlRgTj9LsCQ2","Path":"","Domain":"","Expires":"0001-01-01T00:00:00Z","RawExpires":"","MaxAge":0,"Secure":false,"HttpOnly":false,"Raw":"","Unparsed":null},{"Name":"ds_user","Value":"amoaffiliates","Path":"","Domain":"","Expires":"0001-01-01T00:00:00Z","RawExpires":"","MaxAge":0,"Secure":false,"HttpOnly":false,"Raw":"","Unparsed":null},{"Name":"ds_user_id","Value":"9196830353","Path":"","Domain":"","Expires":"0001-01-01T00:00:00Z","RawExpires":"","MaxAge":0,"Secure":false,"HttpOnly":false,"Raw":"","Unparsed":null},{"Name":"sessionid","Value":"IGSC3ca6dd31ae0226349867a11a0c1f22b22e4c2600a75f1fb9b8d94a525b88c3f6%3A1NTQrxNsDaklEF51z2ZOlTKee3MR864Q%3A%7B%22_auth_user_id%22%3A9196830353%2C%22_auth_user_backend%22%3A%22accounts.backends.CaseInsensitiveModelBackend%22%2C%22_auth_user_hash%22%3A%22%22%2C%22_platform%22%3A1%2C%22_token_ver%22%3A2%2C%22_token%22%3A%229196830353%3AXL6fma5bZhsRzbxt7v2QVnRjRAXeL8YQ%3Acf6a5fd6949c55acef3af0de45f6c4879ebb0f8d157cbf67c4bac791407c8307%22%2C%22last_refreshed%22%3A1543147720.1695582867%7D","Path":"","Domain":"","Expires":"0001-01-01T00:00:00Z","RawExpires":"","MaxAge":0,"Secure":false,"HttpOnly":false,"Raw":"","Unparsed":null}]} \ No newline at end of file diff --git a/main.go b/main.go new file mode 100644 index 0000000..eadbdb6 --- /dev/null +++ b/main.go @@ -0,0 +1,26 @@ +package main + +import ( + "flag" + "fmt" + + "github.com/athletifit/social-network-insights/cmd" + "github.com/athletifit/social-network-insights/models" +) + +func main() { + modePtr := flag.String("mode", "import", "Starts an import of your social data") + var sourcesPtr models.ArrayFlags + flag.Var(&sourcesPtr, "source", "Social Sources to use") + flag.Parse() + + if *modePtr == "export" { + cmd.ExportData(sourcesPtr) + return + } + + err := cmd.ImportData(sourcesPtr) + if err != nil { + fmt.Printf("%+v", err) + } +} diff --git a/models/data-set.go b/models/data-set.go new file mode 100644 index 0000000..a4ac0bb --- /dev/null +++ b/models/data-set.go @@ -0,0 +1,29 @@ +package models + +// DataSet gives a title to the data we want to export. +type DataSet struct { + Title string + Data UserMap +} + +// NewDataSet returns a new DataSet. +func NewDataSet(title string, data UserMap) DataSet { + return DataSet{ + Title: title, + Data: data, + } +} + +type UserSet struct { + Title string + Users []User +} + +func NewUserSet(title string, users []User) *UserSet { + return &UserSet{ + Title: title, + Users: users, + } +} + +type UserSetChan chan *UserSet diff --git a/models/flags.go b/models/flags.go new file mode 100644 index 0000000..dbc239b --- /dev/null +++ b/models/flags.go @@ -0,0 +1,12 @@ +package models + +type ArrayFlags []string + +func (i *ArrayFlags) String() string { + return "flag" +} + +func (i *ArrayFlags) Set(value string) error { + *i = append(*i, value) + return nil +} diff --git a/models/user.go b/models/user.go new file mode 100644 index 0000000..ff00baf --- /dev/null +++ b/models/user.go @@ -0,0 +1,12 @@ +package models + +// User is a struct that holds a user's details. +type User struct { + Email string `redis:"email"` + Name string `redis:"name"` + ScreenName string `redis:"screenName"` + FollowersCount int64 `redis:"followersCount"` + URL string `redis:"url"` +} + +type UserMap map[string]User diff --git a/sheet/client.go b/sheet/client.go new file mode 100644 index 0000000..97b6531 --- /dev/null +++ b/sheet/client.go @@ -0,0 +1,85 @@ +package sheet + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" +) + +func GetSheetClient() *http.Client { + b, err := ioutil.ReadFile("credentials.json") + if err != nil { + log.Fatalf("Unable to read client secret file: %v", err) + } + + // If modifying these scopes, delete your previously saved token.json. + config, err := google.ConfigFromJSON(b, "https://www.googleapis.com/auth/spreadsheets") + if err != nil { + log.Fatalf("Unable to parse client secret file to config: %v", err) + } + client := getClient(config) + + return client +} + +// Retrieve a token, saves the token, then returns the generated client. +func getClient(config *oauth2.Config) *http.Client { + // The file token.json stores the user's access and refresh tokens, and is + // created automatically when the authorization flow completes for the first + // time. + tokFile := "token.json" + tok, err := tokenFromFile(tokFile) + if err != nil { + tok = getTokenFromWeb(config) + saveToken(tokFile, tok) + } + return config.Client(context.Background(), tok) +} + +// Request a token from the web, then returns the retrieved token. +func getTokenFromWeb(config *oauth2.Config) *oauth2.Token { + authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline) + fmt.Printf("Go to the following link in your browser then type the "+ + "authorization code: \n%v\n", authURL) + + var authCode string + if _, err := fmt.Scan(&authCode); err != nil { + log.Fatalf("Unable to read authorization code: %v", err) + } + + tok, err := config.Exchange(context.TODO(), authCode) + if err != nil { + log.Fatalf("Unable to retrieve token from web: %v", err) + } + return tok +} + +// Retrieves a token from a local file. +func tokenFromFile(file string) (*oauth2.Token, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + tok := &oauth2.Token{} + err = json.NewDecoder(f).Decode(tok) + return tok, err +} + +// Saves a token to a file path. +func saveToken(path string, token *oauth2.Token) { + fmt.Printf("Saving credential file to: %s\n", path) + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + log.Fatalf("Unable to cache oauth token: %v", err) + } + defer f.Close() + json.NewEncoder(f).Encode(token) +} diff --git a/source/facebook.go b/source/facebook.go new file mode 100644 index 0000000..af84628 --- /dev/null +++ b/source/facebook.go @@ -0,0 +1,74 @@ +package source + +import ( + "sync" + + "github.com/athletifit/social-network-insights/models" + fb "github.com/huandu/facebook" +) + +// Facebook what we need to sort our followers etc. +type Facebook struct { + API *fb.Session + Conf Conf +} + +// NewFacebook returns a struct with initialised instagram api. +func NewFacebook(env map[string]string, cnf Conf) (*Facebook, error) { + // Get Facebook access token. + + /// Create a global App var to hold app id and secret. + var globalApp = fb.New(env["FACEBOOK_APP_ID"], env["FACEBOOK_APP_SECRET"]) + + // Facebook asks for a valid redirect uri when parsing signed request. + // It's a new enforced policy starting as of late 2013. + globalApp.RedirectUri = "http://stocksinplay.com/" + + token := env["SHORT_LIVED_FACEBOOK_TOKEN"] + // If there is another way to get decoded access token, + // this will return a session created directly from the token. + session := globalApp.Session(token) + + // This validates the access token by ensuring that the current user ID is properly returned. err is nil if token is valid. + err := session.Validate() + + if err != nil { + return nil, err + } + + return &Facebook{ + API: session, + Conf: cnf, + }, nil +} + +// GetUsers gets facebook users. +func (fbk Facebook) GetUsers(ch models.UserSetChan, wg *sync.WaitGroup) { + defer wg.Done() + + users := make([]models.User, 0, 1) + res, _ := fbk.API.Get("/me/insights", nil) + + // create a paging structure. + paging, _ := res.Paging(fbk.API) + var allResults []fb.Result + + // append first page of results to slice of Result + allResults = append(allResults, paging.Data()...) + + for { + // get next page. + noMore, err := paging.Next() + if err != nil { + panic(err) + } + if noMore { + // No more results available + break + } + // append current page of results to slice of Result + allResults = append(allResults, paging.Data()...) + } + + ch <- models.NewUserSet("facebook", users) +} diff --git a/source/instagram.go b/source/instagram.go new file mode 100644 index 0000000..d923972 --- /dev/null +++ b/source/instagram.go @@ -0,0 +1,111 @@ +package source + +import ( + "fmt" + "sync" + + "github.com/athletifit/social-network-insights/models" + "gopkg.in/ahmdrz/goinsta.v2" +) + +// Instagram what we need to sort our followers etc. +type Instagram struct { + API *goinsta.Instagram + Conf Conf +} + +// NewInstagram returns a struct with initialised instagram api. +func NewInstagram(env map[string]string, conf Conf) (*Instagram, error) { + insta, err := goinsta.Import("ig-cookie") + if err != nil { + return nil, err + } + + return &Instagram{ + API: insta, + Conf: conf, + }, nil +} + +// getFriends sort twitter friends by count followers desc. +// TODO: make recursive ? +// Ideally we want to get friends of our friends etc (depth) +func (ig Instagram) getFriends() []models.User { + user, err := ig.API.Profiles.ByName(ig.Conf.Usernames[0]) + if err != nil { + fmt.Printf("%+v", err) + return nil + } + + friends := user.Following() + users := make([]models.User, 0, 1) + + for friends.Next() { + for _, user := range friends.Users { + // we get more details from a profile search + // will cost us more queries but its useful. + friend, err := ig.API.Profiles.ByName(user.Username) + if err != nil { + fmt.Printf("%+v", err) + continue + } + users = append(users, models.User{ + ScreenName: friend.Username, + FollowersCount: int64(friend.FollowerCount), + Name: friend.FullName, + Email: friend.Email, + URL: friend.ExternalURL, + }) + } + fmt.Printf("IG") + } + + return users +} + +// getFollowers sort ig followers by count followers desc. +func (ig Instagram) getFollowers() []models.User { + + user, err := ig.API.Profiles.ByName(ig.Conf.Usernames[0]) + if err != nil { + fmt.Printf("%+v", err) + return nil + } + + followers := user.Followers() + users := make([]models.User, 0, 1) + + for followers.Next() { + for _, user := range followers.Users { + follower, err := ig.API.Profiles.ByName(user.Username) + if err != nil { + fmt.Printf("%+v", err) + continue + } + + users = append(users, models.User{ + ScreenName: follower.Username, + FollowersCount: int64(follower.FollowerCount), + Name: follower.FullName, + Email: follower.Email, + URL: follower.ExternalURL, + }) + } + fmt.Printf("IG") + } + + return users +} + +// GetUsers returns the dataset for the source. +func (ig Instagram) GetUsers(ch models.UserSetChan, wg *sync.WaitGroup) { + defer wg.Done() + + tFol := ig.getFollowers() + tFri := ig.getFriends() + + final := append(tFol, tFri...) + + ch <- models.NewUserSet("instagram", final) + +} diff --git a/source/linkedin.go b/source/linkedin.go new file mode 100644 index 0000000..d150341 --- /dev/null +++ b/source/linkedin.go @@ -0,0 +1 @@ +package source diff --git a/source/mock.go b/source/mock.go new file mode 100644 index 0000000..d8b1f35 --- /dev/null +++ b/source/mock.go @@ -0,0 +1,47 @@ +package source + +import ( + "sync" + + "github.com/athletifit/social-network-insights/models" +) + +// MockSource is a mock source that we use to dev. +type MockSource struct { + Name string +} + +// NewMockSource returns a new MockSource struct. +func NewMockSource(name string) (*MockSource, error) { + return &MockSource{Name: name}, nil +} + +func (m MockSource) getTestUsers() []models.User { + friends := []models.User{ + models.User{ + ScreenName: "nonotest", + FollowersCount: 100, + }, models.User{ + ScreenName: "paul", + FollowersCount: 200, + }, models.User{ + ScreenName: "jerome", + FollowersCount: 50, + }, models.User{ + ScreenName: "lol", + FollowersCount: 1, + }, models.User{ + ScreenName: "lolllll", + FollowersCount: 2, + }} + return friends +} + +// GetUsers returns the dataset for the source. +func (m MockSource) GetUsers(ch models.UserSetChan, wg *sync.WaitGroup) { + defer wg.Done() + + tFol := m.getTestUsers() + + ch <- models.NewUserSet("mock", tFol) +} diff --git a/source/source.go b/source/source.go new file mode 100644 index 0000000..4e56b7b --- /dev/null +++ b/source/source.go @@ -0,0 +1,39 @@ +package source + +import ( + "errors" + "sync" + + "github.com/athletifit/social-network-insights/models" +) + +// TODO: Remove hardcoded strings. + +// Source an interface that defines various methods for our source. +type Source interface { + GetUsers(ch models.UserSetChan, wg *sync.WaitGroup) +} + +// NewSource returns a new source. +func NewSource(srcName string, env map[string]string) (Source, error) { + switch srcName { + case "instagram": + return NewInstagram(env, Conf{[]string{"stocksinplayau"}, 1}) + case "facebook": + return NewFacebook(env, Conf{[]string{env["FACEBOOK_PAGE_NAME"]}, 1}) + case "twitter": + conf := Conf{[]string{env["TWITTER_USERNAME"], "GregOxfordPG"}, 1} + return NewTwitter(env, conf) + case "mock": + return NewMockSource("mock") + } + + return nil, errors.New(srcName + "Source not found") +} + +// Conf is a struct that we will use to pass various configuration elements +// for a source. +type Conf struct { + Usernames []string + Depth int64 +} diff --git a/source/twitter.go b/source/twitter.go new file mode 100644 index 0000000..dddee7f --- /dev/null +++ b/source/twitter.go @@ -0,0 +1,115 @@ +package source + +import ( + "fmt" + "net/url" + "sync" + + "github.com/ChimeraCoder/anaconda" + "github.com/athletifit/social-network-insights/models" +) + +// Twitter what we need to sort our followers etc. +type Twitter struct { + API *anaconda.TwitterApi + Conf Conf +} + +// NewTwitter returns a struct with initialised twitter api. +func NewTwitter(env map[string]string, conf Conf) (*Twitter, error) { + consumerKey := env["TWITTER_CONSUMER_KEY"] + consumerSecret := env["TWITTER_CONSUMER_SECRET"] + accessToken := env["TWITTER_ACCESS_TOKEN"] + tokenSecret := env["TWITTER_TOKEN_SECRET"] + + api := anaconda.NewTwitterApiWithCredentials(accessToken, tokenSecret, consumerKey, consumerSecret) + + return &Twitter{ + API: api, + Conf: conf, + }, nil +} + +// getFriends sort twitter friends by count followers desc. +func (ts Twitter) getFriends(v url.Values) []models.User { + users := make([]models.User, 0, 1) + results := ts.API.GetFriendsListAll(v) + + // iterate through the result channel + for result := range results { + if result.Error != nil { + fmt.Printf("%+v", result.Error) + break + } + + // iterate through each result page + for _, f := range result.Friends { + u := models.User{ + ScreenName: f.ScreenName, + FollowersCount: int64(f.FollowersCount), + URL: f.URL, + Name: f.Name, + Email: f.Email, + } + users = append(users, u) + } + fmt.Printf("T") + } + + return users +} + +// getFollowers sort twitter followers by count of followers desc. +func (ts Twitter) getFollowers(v url.Values) []models.User { + users := make([]models.User, 0, 1) + results := ts.API.GetFollowersListAll(v) + + // iterate through the result channel + for result := range results { + if result.Error != nil { + fmt.Printf("%+v", result.Error) + break + } + + // iterate through each result page + for _, f := range result.Followers { + u := models.User{ + ScreenName: f.ScreenName, + FollowersCount: int64(f.FollowersCount), + URL: f.URL, + Name: f.Name, + Email: f.Email, + } + users = append(users, u) + + } + fmt.Printf("T") + } + return users +} + +// GetUsers returns the users for the source. +func (ts Twitter) GetUsers(ch models.UserSetChan, wg *sync.WaitGroup) { + defer wg.Done() + + final := make([]models.User, 0, 1) + // fixme we dont depuplicate here now but oh well.. + for _, screenName := range ts.Conf.Usernames { + v := ts.getDefaultURLValues(screenName) + + tFol := ts.getFollowers(v) + tFri := ts.getFriends(v) + + final = append(tFol, tFri...) + } + + ch <- models.NewUserSet("twitter", final) +} + +func (ts Twitter) getDefaultURLValues(screenName string) url.Values { + v := url.Values{} + v.Add("screen_name", screenName) + v.Add("include_user_entities", "false") + v.Add("count", "200") + return v +} diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS new file mode 100644 index 0000000..c364af1 --- /dev/null +++ b/vendor/cloud.google.com/go/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Filippo Valsorda +Google Inc. +Ingo Oeser +Palm Stone Games, Inc. +Paweł Knap +Péter Szilágyi +Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS new file mode 100644 index 0000000..3b3cbed --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -0,0 +1,40 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Alexis Hunt +Andreas Litt +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Sansome +David Symonds +Filippo Valsorda +Glenn Lewis +Ingo Oeser +James Hall +Johan Euphrosine +Jonathan Amsterdam +Kunpei Sakai +Luna Duclos +Magnus Hiie +Mario Castro +Michael McGreevy +Omar Jarjur +Paweł Knap +Péter Szilágyi +Sarah Adams +Thanatat Tamtan +Toby Burress +Tuo Shan +Tyler Treat diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 0000000..0d929a6 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,501 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + defaultClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }} + subscribeClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }} +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +func (c *cachedValue) get(cl *Client) (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = cl.getTrimmed(c.k) + } else { + v, err = cl.Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := defaultClient.hc.Do(req.WithContext(ctx)) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no +// ResponseHeaderTimeout). +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + return subscribeClient.Subscribe(suffix, fn) +} + +// Get calls Client.Get on the default client. +func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. All HTTP requests +// will use the given http.Client instead of the default client. +func NewClient(c *http.Client) *Client { + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := c.hc.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) Get(suffix string) (string, error) { + val, _, err := c.getETag(suffix) + return val, err +} + +func (c *Client) getTrimmed(suffix string) (s string, err error) { + s, err = c.Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// ProjectID returns the current instance's project ID string. +func (c *Client) ProjectID() (string, error) { return projID.get(c) } + +// NumericProjectID returns the current instance's numeric project ID. +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } + +// InternalIP returns the instance's primary internal IP address. +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) Hostname() (string, error) { + return c.getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTags() ([]string, error) { + var s []string + j, err := c.Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceName returns the current VM's instance ID string. +func (c *Client) InstanceName() (string, error) { + host, err := c.Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := c.getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/.appveyor.yml b/vendor/github.com/ChimeraCoder/anaconda/.appveyor.yml new file mode 100644 index 0000000..28ea7a4 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/.appveyor.yml @@ -0,0 +1,8 @@ +clone_folder: c:\gopath\src\github.com\ChimeraCoder\anaconda + +environment: + GOPATH: c:\gopath + +build_script: + - go get . + - go test -race -v -timeout 120s diff --git a/vendor/github.com/ChimeraCoder/anaconda/.gitignore b/vendor/github.com/ChimeraCoder/anaconda/.gitignore new file mode 100644 index 0000000..db912a1 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/.gitignore @@ -0,0 +1,6 @@ +*.swp +*.swo +*.swn +conf.sh +*.patch +anaconda.test diff --git a/vendor/github.com/ChimeraCoder/anaconda/.travis.yml b/vendor/github.com/ChimeraCoder/anaconda/.travis.yml new file mode 100644 index 0000000..55a8da7 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/.travis.yml @@ -0,0 +1,36 @@ +language: go + +os: + - linux + - osx + +install: + # Use gofmt from Go 1.9 for the pre-build check on all builds + - if [ "$TRAVIS_OS_NAME" = "osx" ]; then wget -O go.tar.gz https://storage.googleapis.com/golang/go1.9.darwin-amd64.tar.gz; else wget -O go.tar.gz https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz; fi + - tar -C /tmp -xvf go.tar.gz go/bin/gofmt + - rm go.tar.gz + +before_script: + - /tmp/go/bin/gofmt -w . + + # If `go generate` or `gofmt` yielded any changes, + # this will fail with an error message like "too many arguments" + # or "M: binary operator expected" and show the diff. + - git diff + - git add . + - git diff-index --cached --exit-code HEAD + +go: + - 1.7 + - 1.8 + - 1.9 + - tip + +matrix: + include: + - os: linux + go: 1.6 + +script: + - echo $TRAVIS_GO_VERSION + - if [ "$TRAVIS_GO_VERSION" == "1.6" ] || [ "$TRAVIS_GO_VERSION" == "1.7" ] || [ "$TRAVIS_GO_VERSION" == "1.8" ]; then go list ./... | grep -v vendor | xargs go test -race -v -timeout 60s; else go test -race -v -timeout 60s ./...; fi diff --git a/vendor/github.com/ChimeraCoder/anaconda/COPYING b/vendor/github.com/ChimeraCoder/anaconda/COPYING new file mode 120000 index 0000000..7a694c9 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/COPYING @@ -0,0 +1 @@ +LICENSE \ No newline at end of file diff --git a/vendor/github.com/ChimeraCoder/anaconda/Gopkg.lock b/vendor/github.com/ChimeraCoder/anaconda/Gopkg.lock new file mode 100644 index 0000000..4b438c7 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/Gopkg.lock @@ -0,0 +1,39 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/ChimeraCoder/tokenbucket" + packages = ["."] + revision = "c5a927568de7aad8a58127d80bcd36ca4e71e454" + +[[projects]] + branch = "master" + name = "github.com/azr/backoff" + packages = ["."] + revision = "53511d3c733003985b0b76f733df1f4d0095ee6a" + +[[projects]] + branch = "master" + name = "github.com/dustin/go-jsonpointer" + packages = ["."] + revision = "ba0abeacc3dcca5b9b20f31509c46794edbc9965" + +[[projects]] + branch = "master" + name = "github.com/dustin/gojson" + packages = ["."] + revision = "2e71ec9dd5adce3b168cd0dbde03b5cc04951c30" + +[[projects]] + branch = "master" + name = "github.com/garyburd/go-oauth" + packages = ["oauth"] + revision = "166ce8d672783fbb5a72247c3cf459267717e1ec" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "e645e975b86556d43a1fb9a6aacbaa500a8549d3262d8421baca41f04ae42f4f" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/ChimeraCoder/anaconda/Gopkg.toml b/vendor/github.com/ChimeraCoder/anaconda/Gopkg.toml new file mode 100644 index 0000000..2f9f929 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/Gopkg.toml @@ -0,0 +1,38 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + branch = "master" + name = "github.com/ChimeraCoder/tokenbucket" + +[[constraint]] + branch = "master" + name = "github.com/azr/backoff" + +[[constraint]] + branch = "master" + name = "github.com/dustin/go-jsonpointer" + +[[constraint]] + branch = "master" + name = "github.com/garyburd/go-oauth" diff --git a/vendor/github.com/ChimeraCoder/anaconda/LICENSE b/vendor/github.com/ChimeraCoder/anaconda/LICENSE new file mode 100644 index 0000000..bb01fc7 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2013 Aditya Mukerjee, Quotidian Ventures + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE diff --git a/vendor/github.com/ChimeraCoder/anaconda/README b/vendor/github.com/ChimeraCoder/anaconda/README new file mode 100644 index 0000000..0cd411e --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/README @@ -0,0 +1,110 @@ +Anaconda +==================== + +[![Build Status](https://travis-ci.org/ChimeraCoder/anaconda.svg?branch=master)](https://travis-ci.org/ChimeraCoder/anaconda) [![Build Status](https://ci.appveyor.com/api/projects/status/63pi6csod8bps80i/branch/master?svg=true)](https://ci.appveyor.com/project/ChimeraCoder/anaconda/branch/master) [![GoDoc](https://godoc.org/github.com/ChimeraCoder/anaconda?status.svg)](https://godoc.org/github.com/ChimeraCoder/anaconda) + +Anaconda is a simple, transparent Go package for accessing version 1.1 of the Twitter API. + +Successful API queries return native Go structs that can be used immediately, with no need for type assertions. + + + +Examples +------------- + +### Authentication + +If you already have the access token (and secret) for your user (Twitter provides this for your own account on the developer portal), creating the client is simple: + +````go +api := anaconda.NewTwitterApiWithCredentials("your-access-token", "your-access-token-secret", "your-consumer-key", "your-consumer-secret") +```` + +### Queries + +Queries are conducted using a pointer to an authenticated `TwitterApi` struct. In v1.1 of Twitter's API, all requests should be authenticated. + +````go +searchResult, _ := api.GetSearch("golang", nil) +for _ , tweet := range searchResult.Statuses { + fmt.Println(tweet.Text) +} +```` +Certain endpoints allow separate optional parameter; if desired, these can be passed as the final parameter. + +````go +//Perhaps we want 30 values instead of the default 15 +v := url.Values{} +v.Set("count", "30") +result, err := api.GetSearch("golang", v) +```` + +(Remember that `url.Values` is equivalent to a `map[string][]string`, if you find that more convenient notation when specifying values). Otherwise, `nil` suffices. + + + +Endpoints +------------ + +Anaconda implements most of the endpoints defined in the [Twitter API documentation](https://developer.twitter.com/en/docs). For clarity, in most cases, the function name is simply the name of the HTTP method and the endpoint (e.g., the endpoint `GET /friendships/incoming` is provided by the function `GetFriendshipsIncoming`). + +In a few cases, a shortened form has been chosen to make life easier (for example, retweeting is simply the function `Retweet`) + + + +Error Handling, Rate Limiting, and Throttling +--------------------------------- + +### Error Handling + +Twitter errors are returned as an `ApiError`, which satisfies the `error` interface and can be treated as a vanilla `error`. However, it also contains the additional information returned by the Twitter API that may be useful in deciding how to proceed after encountering an error. + + +If you make queries too quickly, you may bump against Twitter's [rate limits](https://developer.twitter.com/en/docs/basics/rate-limits). If this happens, `anaconda` automatically retries the query when the rate limit resets, using the `X-Rate-Limit-Reset` header that Twitter provides to determine how long to wait. + +In other words, users of the `anaconda` library should not need to handle rate limiting errors themselves; this is handled seamlessly behind-the-scenes. If an error is returned by a function, another form of error must have occurred (which can be checked by using the fields provided by the `ApiError` struct). + + +(If desired, this feature can be turned off by calling `ReturnRateLimitError(true)`.) + + +### Throttling + +Anaconda now supports automatic client-side throttling of queries to avoid hitting the Twitter rate-limit. + +This is currently *off* by default; however, it may be turned on by default in future versions of the library, as the implementation is improved. + + +To set a delay between queries, use the `SetDelay` method: + +````go + api.SetDelay(10 * time.Second) +```` + +Delays are set specific to each `TwitterApi` struct, so queries that use different users' access credentials are completely independent. + + +To turn off automatic throttling, set the delay to `0`: + +````go + api.SetDelay(0 * time.Second) +```` + +### Query Queue Persistence + +If your code creates a NewTwitterApi in a regularly called function, you'll need to call `.Close()` on the API struct to clear the queryQueue and allow the goroutine to exit. Otherwise you could see goroutine and therefor heap memory leaks in long-running applications. + +### Google App Engine + +Since Google App Engine doesn't make the standard `http.Transport` available, it's necessary to tell Anaconda to use a different client context. + +````go + api = anaconda.NewTwitterApi("", "") + c := appengine.NewContext(r) + api.HttpClient.Transport = &urlfetch.Transport{Context: c} +```` + + +License +----------- +Anaconda is free software licensed under the MIT/X11 license. Details provided in the LICENSE file. diff --git a/vendor/github.com/ChimeraCoder/anaconda/README.md b/vendor/github.com/ChimeraCoder/anaconda/README.md new file mode 120000 index 0000000..100b938 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/README.md @@ -0,0 +1 @@ +README \ No newline at end of file diff --git a/vendor/github.com/ChimeraCoder/anaconda/account.go b/vendor/github.com/ChimeraCoder/anaconda/account.go new file mode 100644 index 0000000..aba335f --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/account.go @@ -0,0 +1,22 @@ +package anaconda + +import ( + "net/url" +) + +// Verify the credentials by making a very small request +func (a TwitterApi) VerifyCredentials() (ok bool, err error) { + v := cleanValues(nil) + v.Set("include_entities", "false") + v.Set("skip_status", "true") + + _, err = a.GetSelf(v) + return err == nil, err +} + +// Get the user object for the authenticated user. Requests /account/verify_credentials +func (a TwitterApi) GetSelf(v url.Values) (u User, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/account/verify_credentials.json", v, &u, _GET, response_ch} + return u, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/backoff.go b/vendor/github.com/ChimeraCoder/anaconda/backoff.go new file mode 100644 index 0000000..17d7d9e --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/backoff.go @@ -0,0 +1,45 @@ +package anaconda + +import ( + "time" + + "github.com/azr/backoff" +) + +/* +Reconnecting(from https://developer.twitter.com/en/docs/tutorials/consuming-streaming-data) : + +Once an established connection drops, attempt to reconnect immediately. +If the reconnect fails, slow down your reconnect attempts according to the type of error experienced: +*/ + +//Back off linearly for TCP/IP level network errors. +// These problems are generally temporary and tend to clear quickly. +// Increase the delay in reconnects by 250ms each attempt, up to 16 seconds. +func NewTCPIPErrBackoff() backoff.Interface { + return backoff.NewLinear(0, time.Second*16, time.Millisecond*250, 1) +} + +//Back off exponentially for HTTP errors for which reconnecting would be appropriate. +// Start with a 5 second wait, doubling each attempt, up to 320 seconds. +func NewHTTPErrBackoff() backoff.Interface { + eb := backoff.NewExponential() + eb.InitialInterval = time.Second * 5 + eb.MaxInterval = time.Second * 320 + eb.Multiplier = 2 + eb.Reset() + return eb +} + +// Back off exponentially for HTTP 420 errors. +// Start with a 1 minute wait and double each attempt. +// Note that every HTTP 420 received increases the time you must +// wait until rate limiting will no longer will be in effect for your account. +func NewHTTP420ErrBackoff() backoff.Interface { + eb := backoff.NewExponential() + eb.InitialInterval = time.Minute * 1 + eb.Multiplier = 2 + eb.MaxInterval = time.Minute * 20 + eb.Reset() + return eb +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/blocks.go b/vendor/github.com/ChimeraCoder/anaconda/blocks.go new file mode 100644 index 0000000..ac4843f --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/blocks.go @@ -0,0 +1,54 @@ +package anaconda + +import ( + "net/url" + "strconv" +) + +func (a TwitterApi) GetBlocksList(v url.Values) (c UserCursor, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/blocks/list.json", v, &c, _GET, response_ch} + return c, (<-response_ch).err +} + +func (a TwitterApi) GetBlocksIds(v url.Values) (c Cursor, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/blocks/ids.json", v, &c, _GET, response_ch} + return c, (<-response_ch).err +} + +func (a TwitterApi) BlockUser(screenName string, v url.Values) (user User, err error) { + v = cleanValues(v) + v.Set("screen_name", screenName) + return a.Block(v) +} + +func (a TwitterApi) BlockUserId(id int64, v url.Values) (user User, err error) { + v = cleanValues(v) + v.Set("user_id", strconv.FormatInt(id, 10)) + return a.Block(v) +} + +func (a TwitterApi) Block(v url.Values) (user User, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/blocks/create.json", v, &user, _POST, response_ch} + return user, (<-response_ch).err +} + +func (a TwitterApi) UnblockUser(screenName string, v url.Values) (user User, err error) { + v = cleanValues(v) + v.Set("screen_name", screenName) + return a.Unblock(v) +} + +func (a TwitterApi) UnblockUserId(id int64, v url.Values) (user User, err error) { + v = cleanValues(v) + v.Set("user_id", strconv.FormatInt(id, 10)) + return a.Unblock(v) +} + +func (a TwitterApi) Unblock(v url.Values) (user User, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/blocks/destroy.json", v, &user, _POST, response_ch} + return user, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/configuration.go b/vendor/github.com/ChimeraCoder/anaconda/configuration.go new file mode 100644 index 0000000..e854a58 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/configuration.go @@ -0,0 +1,32 @@ +package anaconda + +import ( + "net/url" +) + +type Configuration struct { + CharactersReservedPerMedia int `json:"characters_reserved_per_media"` + MaxMediaPerUpload int `json:"max_media_per_upload"` + NonUsernamePaths []string `json:"non_username_paths"` + PhotoSizeLimit int `json:"photo_size_limit"` + PhotoSizes struct { + Thumb photoSize `json:"thumb"` + Small photoSize `json:"small"` + Medium photoSize `json:"medium"` + Large photoSize `json:"large"` + } `json:"photo_sizes"` + ShortUrlLength int `json:"short_url_length"` + ShortUrlLengthHttps int `json:"short_url_length_https"` +} + +type photoSize struct { + H int `json:"h"` + W int `json:"w"` + Resize string `json:"resize"` +} + +func (a TwitterApi) GetConfiguration(v url.Values) (conf Configuration, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/help/configuration.json", v, &conf, _GET, response_ch} + return conf, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/directmessage.go b/vendor/github.com/ChimeraCoder/anaconda/directmessage.go new file mode 100644 index 0000000..ef14c11 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/directmessage.go @@ -0,0 +1,15 @@ +package anaconda + +type DirectMessage struct { + CreatedAt string `json:"created_at"` + Entities Entities `json:"entities"` + Id int64 `json:"id"` + IdStr string `json:"id_str"` + Recipient User `json:"recipient"` + RecipientId int64 `json:"recipient_id"` + RecipientScreenName string `json:"recipient_screen_name"` + Sender User `json:"sender"` + SenderId int64 `json:"sender_id"` + SenderScreenName string `json:"sender_screen_name"` + Text string `json:"text"` +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/directmessages.go b/vendor/github.com/ChimeraCoder/anaconda/directmessages.go new file mode 100644 index 0000000..95c1859 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/directmessages.go @@ -0,0 +1,57 @@ +package anaconda + +import ( + "net/url" + "strconv" +) + +func (a TwitterApi) GetDirectMessages(v url.Values) (messages []DirectMessage, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/direct_messages.json", v, &messages, _GET, response_ch} + return messages, (<-response_ch).err +} + +func (a TwitterApi) GetDirectMessagesSent(v url.Values) (messages []DirectMessage, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/direct_messages/sent.json", v, &messages, _GET, response_ch} + return messages, (<-response_ch).err +} + +func (a TwitterApi) GetDirectMessagesShow(v url.Values) (message DirectMessage, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/direct_messages/show.json", v, &message, _GET, response_ch} + return message, (<-response_ch).err +} + +// https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/new-message +func (a TwitterApi) PostDMToScreenName(text, screenName string) (message DirectMessage, err error) { + v := url.Values{} + v.Set("screen_name", screenName) + v.Set("text", text) + return a.postDirectMessagesImpl(v) +} + +// https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/new-message +func (a TwitterApi) PostDMToUserId(text string, userId int64) (message DirectMessage, err error) { + v := url.Values{} + v.Set("user_id", strconv.FormatInt(userId, 10)) + v.Set("text", text) + return a.postDirectMessagesImpl(v) +} + +// DeleteDirectMessage will destroy (delete) the direct message with the specified ID. +// https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/delete-message +func (a TwitterApi) DeleteDirectMessage(id int64, includeEntities bool) (message DirectMessage, err error) { + v := url.Values{} + v.Set("id", strconv.FormatInt(id, 10)) + v.Set("include_entities", strconv.FormatBool(includeEntities)) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/direct_messages/destroy.json", v, &message, _POST, response_ch} + return message, (<-response_ch).err +} + +func (a TwitterApi) postDirectMessagesImpl(v url.Values) (message DirectMessage, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/direct_messages/new.json", v, &message, _POST, response_ch} + return message, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/errors.go b/vendor/github.com/ChimeraCoder/anaconda/errors.go new file mode 100644 index 0000000..9acf82a --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/errors.go @@ -0,0 +1,114 @@ +package anaconda + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +const ( + //Error code defintions match the Twitter documentation + //https://developer.twitter.com/en/docs/basics/response-codes + TwitterErrorCouldNotAuthenticate = 32 + TwitterErrorDoesNotExist = 34 + TwitterErrorAccountSuspended = 64 + TwitterErrorApi1Deprecation = 68 //This should never be needed + TwitterErrorRateLimitExceeded = 88 + TwitterErrorInvalidToken = 89 + TwitterErrorOverCapacity = 130 + TwitterErrorInternalError = 131 + TwitterErrorCouldNotAuthenticateYou = 135 + TwitterErrorStatusIsADuplicate = 187 + TwitterErrorBadAuthenticationData = 215 + TwitterErrorUserMustVerifyLogin = 231 + + // Undocumented by Twitter, but may be returned instead of 34 + TwitterErrorDoesNotExist2 = 144 +) + +type ApiError struct { + StatusCode int + Header http.Header + Body string + Decoded TwitterErrorResponse + URL *url.URL +} + +func newApiError(resp *http.Response) *ApiError { + // TODO don't ignore this error + // TODO don't use ReadAll + p, _ := ioutil.ReadAll(resp.Body) + + var twitterErrorResp TwitterErrorResponse + _ = json.Unmarshal(p, &twitterErrorResp) + return &ApiError{ + StatusCode: resp.StatusCode, + Header: resp.Header, + Body: string(p), + Decoded: twitterErrorResp, + URL: resp.Request.URL, + } +} + +// ApiError supports the error interface +func (aerr ApiError) Error() string { + return fmt.Sprintf("Get %s returned status %d, %s", aerr.URL, aerr.StatusCode, aerr.Body) +} + +// Check to see if an error is a Rate Limiting error. If so, find the next available window in the header. +// Use like so: +// +// if aerr, ok := err.(*ApiError); ok { +// if isRateLimitError, nextWindow := aerr.RateLimitCheck(); isRateLimitError { +// <-time.After(nextWindow.Sub(time.Now())) +// } +// } +// +func (aerr *ApiError) RateLimitCheck() (isRateLimitError bool, nextWindow time.Time) { + // TODO check for error code 130, which also signifies a rate limit + if aerr.StatusCode == 429 { + if reset := aerr.Header.Get("X-Rate-Limit-Reset"); reset != "" { + if resetUnix, err := strconv.ParseInt(reset, 10, 64); err == nil { + resetTime := time.Unix(resetUnix, 0) + // Reject any time greater than an hour away + if resetTime.Sub(time.Now()) > time.Hour { + return true, time.Now().Add(15 * time.Minute) + } + + return true, resetTime + } + } + } + + return false, time.Time{} +} + +//TwitterErrorResponse has an array of Twitter error messages +//It satisfies the "error" interface +//For the most part, Twitter seems to return only a single error message +//Currently, we assume that this always contains exactly one error message +type TwitterErrorResponse struct { + Errors []TwitterError `json:"errors"` +} + +func (tr TwitterErrorResponse) First() error { + return tr.Errors[0] +} + +func (tr TwitterErrorResponse) Error() string { + return tr.Errors[0].Message +} + +//TwitterError represents a single Twitter error messages/code pair +type TwitterError struct { + Message string `json:"message"` + Code int `json:"code"` +} + +func (te TwitterError) Error() string { + return te.Message +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/favorites.go b/vendor/github.com/ChimeraCoder/anaconda/favorites.go new file mode 100644 index 0000000..96f19ee --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/favorites.go @@ -0,0 +1,11 @@ +package anaconda + +import ( + "net/url" +) + +func (a TwitterApi) GetFavorites(v url.Values) (favorites []Tweet, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/favorites/list.json", v, &favorites, _GET, response_ch} + return favorites, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/friends_followers.go b/vendor/github.com/ChimeraCoder/anaconda/friends_followers.go new file mode 100644 index 0000000..ec0ba71 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/friends_followers.go @@ -0,0 +1,289 @@ +package anaconda + +import ( + "net/url" + "strconv" +) + +type Cursor struct { + Previous_cursor int64 + Previous_cursor_str string + + Ids []int64 + + Next_cursor int64 + Next_cursor_str string +} + +type UserCursor struct { + Previous_cursor int64 + Previous_cursor_str string + Next_cursor int64 + Next_cursor_str string + Users []User +} + +type FriendsIdsCursor struct { + Previous_cursor int64 + Previous_cursor_str string + Next_cursor int64 + Next_cursor_str string + Ids []int64 +} + +type FriendsIdsPage struct { + Ids []int64 + Error error +} + +type Friendship struct { + Name string + Id_str string + Id int64 + Connections []string + Screen_name string +} + +type FollowersPage struct { + Followers []User + Error error +} + +type FriendsPage struct { + Friends []User + Error error +} + +// FIXME: Might want to consolidate this with FriendsIdsPage and just +// have "UserIdsPage". +type FollowersIdsPage struct { + Ids []int64 + Error error +} + +// GetFriendshipsNoRetweets returns a collection of user_ids that the currently authenticated user does not want to receive retweets from. +// It does not currently support the stringify_ids parameter. +func (a TwitterApi) GetFriendshipsNoRetweets() (ids []int64, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/friendships/no_retweets/ids.json", nil, &ids, _GET, response_ch} + return ids, (<-response_ch).err +} + +func (a TwitterApi) GetFollowersIds(v url.Values) (c Cursor, err error) { + err = a.apiGet(a.baseUrl+"/followers/ids.json", v, &c) + return +} + +// Like GetFollowersIds, but returns a channel instead of a cursor and pre-fetches the remaining results +// This channel is closed once all values have been fetched +func (a TwitterApi) GetFollowersIdsAll(v url.Values) (result chan FollowersIdsPage) { + result = make(chan FollowersIdsPage) + + v = cleanValues(v) + go func(a TwitterApi, v url.Values, result chan FollowersIdsPage) { + // Cursor defaults to the first page ("-1") + next_cursor := "-1" + for { + v.Set("cursor", next_cursor) + c, err := a.GetFollowersIds(v) + + // throttledQuery() handles all rate-limiting errors + // if GetFollowersList() returns an error, it must be a different kind of error + + result <- FollowersIdsPage{c.Ids, err} + + next_cursor = c.Next_cursor_str + if err != nil || next_cursor == "0" { + close(result) + break + } + } + }(a, v, result) + return result +} + +func (a TwitterApi) GetFriendsIds(v url.Values) (c Cursor, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/friends/ids.json", v, &c, _GET, response_ch} + return c, (<-response_ch).err +} + +func (a TwitterApi) GetFriendshipsLookup(v url.Values) (friendships []Friendship, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/friendships/lookup.json", v, &friendships, _GET, response_ch} + return friendships, (<-response_ch).err +} + +func (a TwitterApi) GetFriendshipsIncoming(v url.Values) (c Cursor, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/friendships/incoming.json", v, &c, _GET, response_ch} + return c, (<-response_ch).err +} + +func (a TwitterApi) GetFriendshipsOutgoing(v url.Values) (c Cursor, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/friendships/outgoing.json", v, &c, _GET, response_ch} + return c, (<-response_ch).err +} + +func (a TwitterApi) GetFollowersList(v url.Values) (c UserCursor, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/followers/list.json", v, &c, _GET, response_ch} + return c, (<-response_ch).err +} + +func (a TwitterApi) GetFriendsList(v url.Values) (c UserCursor, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/friends/list.json", v, &c, _GET, response_ch} + return c, (<-response_ch).err +} + +// Like GetFriendsList, but returns a channel instead of a cursor and pre-fetches the remaining results +// This channel is closed once all values have been fetched +func (a TwitterApi) GetFriendsListAll(v url.Values) (result chan FriendsPage) { + result = make(chan FriendsPage) + + v = cleanValues(v) + go func(a TwitterApi, v url.Values, result chan FriendsPage) { + // Cursor defaults to the first page ("-1") + next_cursor := "-1" + for { + v.Set("cursor", next_cursor) + c, err := a.GetFriendsList(v) + + // throttledQuery() handles all rate-limiting errors + // if GetFriendsListAll() returns an error, it must be a different kind of error + + result <- FriendsPage{c.Users, err} + + next_cursor = c.Next_cursor_str + if err != nil || next_cursor == "0" { + close(result) + break + } + } + }(a, v, result) + return result +} + +// Like GetFollowersList, but returns a channel instead of a cursor and pre-fetches the remaining results +// This channel is closed once all values have been fetched +func (a TwitterApi) GetFollowersListAll(v url.Values) (result chan FollowersPage) { + result = make(chan FollowersPage) + + v = cleanValues(v) + go func(a TwitterApi, v url.Values, result chan FollowersPage) { + // Cursor defaults to the first page ("-1") + next_cursor := "-1" + for { + v.Set("cursor", next_cursor) + c, err := a.GetFollowersList(v) + + // throttledQuery() handles all rate-limiting errors + // if GetFollowersList() returns an error, it must be a different kind of error + + result <- FollowersPage{c.Users, err} + + next_cursor = c.Next_cursor_str + if err != nil || next_cursor == "0" { + close(result) + break + } + } + }(a, v, result) + return result +} + +func (a TwitterApi) GetFollowersUser(id int64, v url.Values) (c Cursor, err error) { + v = cleanValues(v) + v.Set("user_id", strconv.FormatInt(id, 10)) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/followers/ids.json", v, &c, _GET, response_ch} + return c, (<-response_ch).err +} + +// Like GetFriendsIds, but returns a channel instead of a cursor and pre-fetches the remaining results +// This channel is closed once all values have been fetched +func (a TwitterApi) GetFriendsIdsAll(v url.Values) (result chan FriendsIdsPage) { + result = make(chan FriendsIdsPage) + + v = cleanValues(v) + go func(a TwitterApi, v url.Values, result chan FriendsIdsPage) { + // Cursor defaults to the first page ("-1") + next_cursor := "-1" + for { + v.Set("cursor", next_cursor) + c, err := a.GetFriendsIds(v) + + // throttledQuery() handles all rate-limiting errors + // if GetFollowersList() returns an error, it must be a different kind of error + + result <- FriendsIdsPage{c.Ids, err} + + next_cursor = c.Next_cursor_str + if err != nil || next_cursor == "0" { + close(result) + break + } + } + }(a, v, result) + return result +} + +func (a TwitterApi) GetFriendsUser(id int64, v url.Values) (c Cursor, err error) { + v = cleanValues(v) + v.Set("user_id", strconv.FormatInt(id, 10)) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/friends/ids.json", v, &c, _GET, response_ch} + return c, (<-response_ch).err +} + +// FollowUserId follows the user with the specified userId. +// This implements the /friendships/create endpoint, though the function name +// uses the terminology 'follow' as this is most consistent with colloquial Twitter terminology. +func (a TwitterApi) FollowUserId(userId int64, v url.Values) (user User, err error) { + v = cleanValues(v) + v.Set("user_id", strconv.FormatInt(userId, 10)) + return a.postFriendshipsCreateImpl(v) +} + +// FollowUserId follows the user with the specified screenname (username). +// This implements the /friendships/create endpoint, though the function name +// uses the terminology 'follow' as this is most consistent with colloquial Twitter terminology. +func (a TwitterApi) FollowUser(screenName string) (user User, err error) { + v := url.Values{} + v.Set("screen_name", screenName) + return a.postFriendshipsCreateImpl(v) +} + +func (a TwitterApi) postFriendshipsCreateImpl(v url.Values) (user User, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/friendships/create.json", v, &user, _POST, response_ch} + return user, (<-response_ch).err +} + +// UnfollowUserId unfollows the user with the specified userId. +// This implements the /friendships/destroy endpoint, though the function name +// uses the terminology 'unfollow' as this is most consistent with colloquial Twitter terminology. +func (a TwitterApi) UnfollowUserId(userId int64) (u User, err error) { + v := url.Values{} + v.Set("user_id", strconv.FormatInt(userId, 10)) + // Set other values before calling this method: + // page, count, include_entities + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/friendships/destroy.json", v, &u, _POST, response_ch} + return u, (<-response_ch).err +} + +// UnfollowUser unfollows the user with the specified screenname (username) +// This implements the /friendships/destroy endpoint, though the function name +// uses the terminology 'unfollow' as this is most consistent with colloquial Twitter terminology. +func (a TwitterApi) UnfollowUser(screenname string) (u User, err error) { + v := url.Values{} + v.Set("screen_name", screenname) + // Set other values before calling this method: + // page, count, include_entities + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/friendships/destroy.json", v, &u, _POST, response_ch} + return u, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/geosearch.go b/vendor/github.com/ChimeraCoder/anaconda/geosearch.go new file mode 100644 index 0000000..5900714 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/geosearch.go @@ -0,0 +1,57 @@ +package anaconda + +import "net/url" + +type GeoSearchResult struct { + Result struct { + Places []struct { + ID string `json:"id"` + URL string `json:"url"` + PlaceType string `json:"place_type"` + Name string `json:"name"` + FullName string `json:"full_name"` + CountryCode string `json:"country_code"` + Country string `json:"country"` + ContainedWithin []struct { + ID string `json:"id"` + URL string `json:"url"` + PlaceType string `json:"place_type"` + Name string `json:"name"` + FullName string `json:"full_name"` + CountryCode string `json:"country_code"` + Country string `json:"country"` + Centroid []float64 `json:"centroid"` + BoundingBox struct { + Type string `json:"type"` + Coordinates [][][]float64 `json:"coordinates"` + } `json:"bounding_box"` + Attributes struct { + } `json:"attributes"` + } `json:"contained_within"` + Centroid []float64 `json:"centroid"` + BoundingBox struct { + Type string `json:"type"` + Coordinates [][][]float64 `json:"coordinates"` + } `json:"bounding_box"` + Attributes struct { + } `json:"attributes"` + } `json:"places"` + } `json:"result"` + Query struct { + URL string `json:"url"` + Type string `json:"type"` + Params struct { + Accuracy float64 `json:"accuracy"` + Granularity string `json:"granularity"` + Query string `json:"query"` + Autocomplete bool `json:"autocomplete"` + TrimPlace bool `json:"trim_place"` + } `json:"params"` + } `json:"query"` +} + +func (a TwitterApi) GeoSearch(v url.Values) (c GeoSearchResult, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/geo/search.json", v, &c, _GET, response_ch} + return c, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/list.go b/vendor/github.com/ChimeraCoder/anaconda/list.go new file mode 100644 index 0000000..9a9cabc --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/list.go @@ -0,0 +1,26 @@ +package anaconda + +type ListResponse struct { + PreviousCursor int `json:"previous_cursor"` + NextCursor int `json:"next_cursor"` + Lists []List `json:"lists"` +} + +type AddUserToListResponse struct { + Users []User `json:"users"` +} + +type List struct { + Slug string `json:"slug"` + Name string `json:"name"` + URL string `json:"uri"` + CreatedAt string `json:"created_at"` + Id int64 `json:"id"` + SubscriberCount int64 `json:"subscriber_count"` + MemberCount int64 `json:"member_count"` + Mode string `json:"mode"` + FullName string `json:"full_name"` + Description string `json:"description"` + User User `json:"user"` + Following bool `json:"following"` +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/lists.go b/vendor/github.com/ChimeraCoder/anaconda/lists.go new file mode 100644 index 0000000..985cfc5 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/lists.go @@ -0,0 +1,87 @@ +package anaconda + +import ( + "net/url" + "strconv" + "strings" +) + +// CreateList implements /lists/create.json +func (a TwitterApi) CreateList(name, description string, v url.Values) (list List, err error) { + v = cleanValues(v) + v.Set("name", name) + v.Set("description", description) + + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/lists/create.json", v, &list, _POST, response_ch} + return list, (<-response_ch).err +} + +// AddUserToList implements /lists/members/create.json +func (a TwitterApi) AddUserToList(screenName string, listID int64, v url.Values) (users []User, err error) { + v = cleanValues(v) + v.Set("list_id", strconv.FormatInt(listID, 10)) + v.Set("screen_name", screenName) + + var addUserToListResponse AddUserToListResponse + + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/lists/members/create.json", v, &addUserToListResponse, _POST, response_ch} + return addUserToListResponse.Users, (<-response_ch).err +} + +// AddMultipleUsersToList implements /lists/members/create_all.json +func (a TwitterApi) AddMultipleUsersToList(screenNames []string, listID int64, v url.Values) (list List, err error) { + v = cleanValues(v) + v.Set("list_id", strconv.FormatInt(listID, 10)) + v.Set("screen_name", strings.Join(screenNames, ",")) + + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/lists/members/create_all.json", v, &list, _POST, response_ch} + r := <-response_ch + return list, r.err +} + +// GetListsOwnedBy implements /lists/ownerships.json +// screen_name, count, and cursor are all optional values +func (a TwitterApi) GetListsOwnedBy(userID int64, v url.Values) (lists []List, err error) { + v = cleanValues(v) + v.Set("user_id", strconv.FormatInt(userID, 10)) + + var listResponse ListResponse + + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/lists/ownerships.json", v, &listResponse, _GET, response_ch} + return listResponse.Lists, (<-response_ch).err +} + +func (a TwitterApi) GetListTweets(listID int64, includeRTs bool, v url.Values) (tweets []Tweet, err error) { + v = cleanValues(v) + v.Set("list_id", strconv.FormatInt(listID, 10)) + v.Set("include_rts", strconv.FormatBool(includeRTs)) + + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/lists/statuses.json", v, &tweets, _GET, response_ch} + return tweets, (<-response_ch).err +} + +// GetList implements /lists/show.json +func (a TwitterApi) GetList(listID int64, v url.Values) (list List, err error) { + v = cleanValues(v) + v.Set("list_id", strconv.FormatInt(listID, 10)) + + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/lists/show.json", v, &list, _GET, response_ch} + return list, (<-response_ch).err +} + +func (a TwitterApi) GetListTweetsBySlug(slug string, ownerScreenName string, includeRTs bool, v url.Values) (tweets []Tweet, err error) { + v = cleanValues(v) + v.Set("slug", slug) + v.Set("owner_screen_name", ownerScreenName) + v.Set("include_rts", strconv.FormatBool(includeRTs)) + + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/lists/statuses.json", v, &tweets, _GET, response_ch} + return tweets, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/log.go b/vendor/github.com/ChimeraCoder/anaconda/log.go new file mode 100644 index 0000000..86b541c --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/log.go @@ -0,0 +1,91 @@ +package anaconda + +import ( + "log" + "os" +) + +// The Logger interface provides optional logging ability for the streaming API. +// It can also be used to log the rate limiting headers if desired. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + + Panic(args ...interface{}) + Panicf(format string, args ...interface{}) + + // Log functions + Critical(args ...interface{}) + Criticalf(format string, args ...interface{}) + + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + + Warning(args ...interface{}) + Warningf(format string, args ...interface{}) + + Notice(args ...interface{}) + Noticef(format string, args ...interface{}) + + Info(args ...interface{}) + Infof(format string, args ...interface{}) + + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) +} + +// SetLogger sets the Logger used by the API client. +// The default logger is silent. BasicLogger will log to STDERR +// using the log package from the standard library. +func (c *TwitterApi) SetLogger(l Logger) { + c.Log = l +} + +type silentLogger struct { +} + +func (_ silentLogger) Fatal(_ ...interface{}) {} +func (_ silentLogger) Fatalf(_ string, _ ...interface{}) {} +func (_ silentLogger) Panic(_ ...interface{}) {} +func (_ silentLogger) Panicf(_ string, _ ...interface{}) {} +func (_ silentLogger) Critical(_ ...interface{}) {} +func (_ silentLogger) Criticalf(_ string, _ ...interface{}) {} +func (_ silentLogger) Error(_ ...interface{}) {} +func (_ silentLogger) Errorf(_ string, _ ...interface{}) {} +func (_ silentLogger) Warning(_ ...interface{}) {} +func (_ silentLogger) Warningf(_ string, _ ...interface{}) {} +func (_ silentLogger) Notice(_ ...interface{}) {} +func (_ silentLogger) Noticef(_ string, _ ...interface{}) {} +func (_ silentLogger) Info(_ ...interface{}) {} +func (_ silentLogger) Infof(_ string, _ ...interface{}) {} +func (_ silentLogger) Debug(_ ...interface{}) {} +func (_ silentLogger) Debugf(format string, _ ...interface{}) {} + +// BasicLogger is the equivalent of using log from the standard +// library to print to STDERR. +var BasicLogger Logger + +type basicLogger struct { + log *log.Logger //func New(out io.Writer, prefix string, flag int) *Logger +} + +func init() { + BasicLogger = &basicLogger{log: log.New(os.Stderr, log.Prefix(), log.LstdFlags)} +} + +func (l basicLogger) Fatal(items ...interface{}) { l.log.Fatal(items...) } +func (l basicLogger) Fatalf(s string, items ...interface{}) { l.log.Fatalf(s, items...) } +func (l basicLogger) Panic(items ...interface{}) { l.log.Panic(items...) } +func (l basicLogger) Panicf(s string, items ...interface{}) { l.log.Panicf(s, items...) } +func (l basicLogger) Critical(items ...interface{}) { l.log.Print(items...) } +func (l basicLogger) Criticalf(s string, items ...interface{}) { l.log.Printf(s, items...) } +func (l basicLogger) Error(items ...interface{}) { l.log.Print(items...) } +func (l basicLogger) Errorf(s string, items ...interface{}) { l.log.Printf(s, items...) } +func (l basicLogger) Warning(items ...interface{}) { l.log.Print(items...) } +func (l basicLogger) Warningf(s string, items ...interface{}) { l.log.Printf(s, items...) } +func (l basicLogger) Notice(items ...interface{}) { l.log.Print(items...) } +func (l basicLogger) Noticef(s string, items ...interface{}) { l.log.Printf(s, items...) } +func (l basicLogger) Info(items ...interface{}) { l.log.Print(items...) } +func (l basicLogger) Infof(s string, items ...interface{}) { l.log.Printf(s, items...) } +func (l basicLogger) Debug(items ...interface{}) { l.log.Print(items...) } +func (l basicLogger) Debugf(s string, items ...interface{}) { l.log.Printf(s, items...) } diff --git a/vendor/github.com/ChimeraCoder/anaconda/media.go b/vendor/github.com/ChimeraCoder/anaconda/media.go new file mode 100644 index 0000000..ed44859 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/media.go @@ -0,0 +1,89 @@ +package anaconda + +import ( + "net/url" + "strconv" +) + +type Media struct { + MediaID int64 `json:"media_id"` + MediaIDString string `json:"media_id_string"` + Size int `json:"size"` + Image Image `json:"image"` +} + +type Image struct { + W int `json:"w"` + H int `json:"h"` + ImageType string `json:"image_type"` +} + +type ChunkedMedia struct { + MediaID int64 `json:"media_id"` + MediaIDString string `json:"media_id_string"` + ExpiresAfterSecs int `json:"expires_after_secs"` +} + +type Video struct { + VideoType string `json:"video_type"` +} + +type VideoMedia struct { + MediaID int64 `json:"media_id"` + MediaIDString string `json:"media_id_string"` + Size int `json:"size"` + ExpiresAfterSecs int `json:"expires_after_secs"` + Video Video `json:"video"` +} + +func (a TwitterApi) UploadMedia(base64String string) (media Media, err error) { + v := url.Values{} + v.Set("media_data", base64String) + + var mediaResponse Media + + response_ch := make(chan response) + a.queryQueue <- query{UploadBaseUrl + "/media/upload.json", v, &mediaResponse, _POST, response_ch} + return mediaResponse, (<-response_ch).err +} + +func (a TwitterApi) UploadVideoInit(totalBytes int, mimeType string) (chunkedMedia ChunkedMedia, err error) { + v := url.Values{} + v.Set("command", "INIT") + v.Set("media_type", mimeType) + v.Set("total_bytes", strconv.FormatInt(int64(totalBytes), 10)) + + var mediaResponse ChunkedMedia + + response_ch := make(chan response) + a.queryQueue <- query{UploadBaseUrl + "/media/upload.json", v, &mediaResponse, _POST, response_ch} + return mediaResponse, (<-response_ch).err +} + +func (a TwitterApi) UploadVideoAppend(mediaIdString string, + segmentIndex int, base64String string) error { + + v := url.Values{} + v.Set("command", "APPEND") + v.Set("media_id", mediaIdString) + v.Set("media_data", base64String) + v.Set("segment_index", strconv.FormatInt(int64(segmentIndex), 10)) + + var emptyResponse interface{} + + response_ch := make(chan response) + a.queryQueue <- query{UploadBaseUrl + "/media/upload.json", v, &emptyResponse, _POST, response_ch} + return (<-response_ch).err +} + +func (a TwitterApi) UploadVideoFinalize(mediaIdString string) (videoMedia VideoMedia, err error) { + v := url.Values{} + v.Set("command", "FINALIZE") + v.Set("media_id", mediaIdString) + + var mediaResponse VideoMedia + + response_ch := make(chan response) + a.queryQueue <- query{UploadBaseUrl + "/media/upload.json", v, &mediaResponse, _POST, response_ch} + return mediaResponse, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/mutes.go b/vendor/github.com/ChimeraCoder/anaconda/mutes.go new file mode 100644 index 0000000..46ee0ac --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/mutes.go @@ -0,0 +1,54 @@ +package anaconda + +import ( + "net/url" + "strconv" +) + +func (a TwitterApi) GetMutedUsersList(v url.Values) (c UserCursor, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/mutes/users/list.json", v, &c, _GET, response_ch} + return c, (<-response_ch).err +} + +func (a TwitterApi) GetMutedUsersIds(v url.Values) (c Cursor, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/mutes/users/ids.json", v, &c, _GET, response_ch} + return c, (<-response_ch).err +} + +func (a TwitterApi) MuteUser(screenName string, v url.Values) (user User, err error) { + v = cleanValues(v) + v.Set("screen_name", screenName) + return a.Mute(v) +} + +func (a TwitterApi) MuteUserId(id int64, v url.Values) (user User, err error) { + v = cleanValues(v) + v.Set("user_id", strconv.FormatInt(id, 10)) + return a.Mute(v) +} + +func (a TwitterApi) Mute(v url.Values) (user User, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/mutes/users/create.json", v, &user, _POST, response_ch} + return user, (<-response_ch).err +} + +func (a TwitterApi) UnmuteUser(screenName string, v url.Values) (user User, err error) { + v = cleanValues(v) + v.Set("screen_name", screenName) + return a.Unmute(v) +} + +func (a TwitterApi) UnmuteUserId(id int64, v url.Values) (user User, err error) { + v = cleanValues(v) + v.Set("user_id", strconv.FormatInt(id, 10)) + return a.Unmute(v) +} + +func (a TwitterApi) Unmute(v url.Values) (user User, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/mutes/users/destroy.json", v, &user, _POST, response_ch} + return user, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/oembed.go b/vendor/github.com/ChimeraCoder/anaconda/oembed.go new file mode 100644 index 0000000..1011caf --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/oembed.go @@ -0,0 +1,59 @@ +package anaconda + +import ( + "net/http" + "net/url" + "strconv" +) + +type OEmbed struct { + Type string + Width int + Cache_age string + Height int + Author_url string + Html string + Version string + Provider_name string + Provider_url string + Url string + Author_name string +} + +// No authorization on this endpoint. Its the only one. +func (a TwitterApi) GetOEmbed(v url.Values) (o OEmbed, err error) { + resp, err := http.Get(a.baseUrlV1() + "/statuses/oembed.json?" + v.Encode()) + if err != nil { + return + } + defer resp.Body.Close() + + err = decodeResponse(resp, &o) + return +} + +// Calls GetOEmbed with the corresponding id. Convenience wrapper for GetOEmbed() +func (a TwitterApi) GetOEmbedId(id int64, v url.Values) (o OEmbed, err error) { + v = cleanValues(v) + v.Set("id", strconv.FormatInt(id, 10)) + resp, err := http.Get(a.baseUrlV1() + "/statuses/oembed.json?" + v.Encode()) + if err != nil { + return + } + defer resp.Body.Close() + + err = decodeResponse(resp, &o) + return +} + +func (a TwitterApi) baseUrlV1() string { + if a.baseUrl == BaseUrl { + return BaseUrlV1 + } + + if a.baseUrl == "" { + return BaseUrlV1 + } + + return a.baseUrl +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/place.go b/vendor/github.com/ChimeraCoder/anaconda/place.go new file mode 100644 index 0000000..86d3bfd --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/place.go @@ -0,0 +1,35 @@ +package anaconda + +type Place struct { + Attributes map[string]string `json:"attributes"` + BoundingBox struct { + Coordinates [][][]float64 `json:"coordinates"` + Type string `json:"type"` + } `json:"bounding_box"` + ContainedWithin []struct { + Attributes map[string]string `json:"attributes"` + BoundingBox struct { + Coordinates [][][]float64 `json:"coordinates"` + Type string `json:"type"` + } `json:"bounding_box"` + Country string `json:"country"` + CountryCode string `json:"country_code"` + FullName string `json:"full_name"` + ID string `json:"id"` + Name string `json:"name"` + PlaceType string `json:"place_type"` + URL string `json:"url"` + } `json:"contained_within"` + Country string `json:"country"` + CountryCode string `json:"country_code"` + FullName string `json:"full_name"` + Geometry struct { + Coordinates [][][]float64 `json:"coordinates"` + Type string `json:"type"` + } `json:"geometry"` + ID string `json:"id"` + Name string `json:"name"` + PlaceType string `json:"place_type"` + Polylines []string `json:"polylines"` + URL string `json:"url"` +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/rate_limit_status.go b/vendor/github.com/ChimeraCoder/anaconda/rate_limit_status.go new file mode 100644 index 0000000..ba7dc43 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/rate_limit_status.go @@ -0,0 +1,30 @@ +package anaconda + +import ( + "net/url" + "strings" +) + +type RateLimitStatusResponse struct { + RateLimitContext RateLimitContext `json:"rate_limit_context"` + Resources map[string]map[string]BaseResource `json:"resources"` +} + +type RateLimitContext struct { + AccessToken string `json:"access_token"` +} + +type BaseResource struct { + Limit int `json:"limit"` + Remaining int `json:"remaining"` + Reset int `json:"reset"` +} + +func (a TwitterApi) GetRateLimits(r []string) (rateLimitStatusResponse RateLimitStatusResponse, err error) { + resources := strings.Join(r, ",") + v := url.Values{} + v.Set("resources", resources) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/application/rate_limit_status.json", v, &rateLimitStatusResponse, _GET, response_ch} + return rateLimitStatusResponse, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/relationship.go b/vendor/github.com/ChimeraCoder/anaconda/relationship.go new file mode 100644 index 0000000..c53b68a --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/relationship.go @@ -0,0 +1,40 @@ +package anaconda + +import ( + "net/url" +) + +type RelationshipResponse struct { + Relationship Relationship `json:"relationship"` +} +type Relationship struct { + Target Target `json:"target"` + Source Source `json:"source"` +} +type Target struct { + Id int64 `json:"id"` + Id_str string `json:"id_str"` + Screen_name string `json:"screen_name"` + Following bool `json:"following"` + Followed_by bool `json:"followed_by"` +} +type Source struct { + Id int64 + Id_str string + Screen_name string + Following bool + Followed_by bool + Can_dm bool + Blocking bool + Muting bool + Marked_spam bool + All_replies bool + Want_retweets bool + Notifications_enabled bool +} + +func (a TwitterApi) GetFriendshipsShow(v url.Values) (relationshipResponse RelationshipResponse, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/friendships/show.json", v, &relationshipResponse, _GET, response_ch} + return relationshipResponse, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/search.go b/vendor/github.com/ChimeraCoder/anaconda/search.go new file mode 100644 index 0000000..9853349 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/search.go @@ -0,0 +1,57 @@ +package anaconda + +import ( + "net/url" +) + +type SearchMetadata struct { + CompletedIn float32 `json:"completed_in"` + MaxId int64 `json:"max_id"` + MaxIdString string `json:"max_id_str"` + Query string `json:"query"` + RefreshUrl string `json:"refresh_url"` + Count int `json:"count"` + SinceId int64 `json:"since_id"` + SinceIdString string `json:"since_id_str"` + NextResults string `json:"next_results"` +} + +type SearchResponse struct { + Statuses []Tweet `json:"statuses"` + Metadata SearchMetadata `json:"search_metadata"` +} + +func (sr *SearchResponse) GetNext(a *TwitterApi) (SearchResponse, error) { + if sr.Metadata.NextResults == "" { + return SearchResponse{}, nil + } + nextUrl, err := url.Parse(sr.Metadata.NextResults) + if err != nil { + return SearchResponse{}, err + } + + v := nextUrl.Query() + // remove the q parameter from the url.Values so that it + // can be added back via the next GetSearch method call. + delete(v, "q") + + q, _ := url.QueryUnescape(sr.Metadata.Query) + if err != nil { + return SearchResponse{}, err + } + newSr, err := a.GetSearch(q, v) + return newSr, err +} + +func (a TwitterApi) GetSearch(queryString string, v url.Values) (sr SearchResponse, err error) { + v = cleanValues(v) + v.Set("q", queryString) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/search/tweets.json", v, &sr, _GET, response_ch} + + // We have to read from the response channel before assigning to timeline + // Otherwise this will happen before the responses have been written + resp := <-response_ch + err = resp.err + return sr, err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/streaming.go b/vendor/github.com/ChimeraCoder/anaconda/streaming.go new file mode 100644 index 0000000..5703e16 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/streaming.go @@ -0,0 +1,318 @@ +package anaconda + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/dustin/go-jsonpointer" +) + +const ( + BaseUrlUserStream = "https://userstream.twitter.com/1.1" + BaseUrlSiteStream = "https://sitestream.twitter.com/1.1" + BaseUrlStream = "https://stream.twitter.com/1.1" +) + +// messages + +type StatusDeletionNotice struct { + Id int64 `json:"id"` + IdStr string `json:"id_str"` + UserId int64 `json:"user_id"` + UserIdStr string `json:"user_id_str"` +} +type statusDeletionNotice struct { + Delete *struct { + Status *StatusDeletionNotice `json:"status"` + } `json:"delete"` +} + +type DirectMessageDeletionNotice struct { + Id int64 `json:"id"` + IdStr string `json:"id_str"` + UserId int64 `json:"user_id"` + UserIdStr string `json:"user_id_str"` +} + +type directMessageDeletionNotice struct { + Delete *struct { + DirectMessage *DirectMessageDeletionNotice `json:"direct_message"` + } `json:"delete"` +} + +type LocationDeletionNotice struct { + UserId int64 `json:"user_id"` + UserIdStr string `json:"user_id_str"` + UpToStatusId int64 `json:"up_to_status_id"` + UpToStatusIdStr string `json:"up_to_status_id_str"` +} +type locationDeletionNotice struct { + ScrubGeo *LocationDeletionNotice `json:"scrub_geo"` +} + +type LimitNotice struct { + Track int64 `json:"track"` +} +type limitNotice struct { + Limit *LimitNotice `json:"limit"` +} + +type StatusWithheldNotice struct { + Id int64 `json:"id"` + UserId int64 `json:"user_id"` + WithheldInCountries []string `json:"withheld_in_countries"` +} +type statusWithheldNotice struct { + StatusWithheld *StatusWithheldNotice `json:"status_withheld"` +} + +type UserWithheldNotice struct { + Id int64 `json:"id"` + WithheldInCountries []string `json:"withheld_in_countries"` +} +type userWithheldNotice struct { + UserWithheld *UserWithheldNotice `json:"user_withheld"` +} + +type DisconnectMessage struct { + Code int64 `json:"code"` + StreamName string `json:"stream_name"` + Reason string `json:"reason"` +} +type disconnectMessage struct { + Disconnect *DisconnectMessage `json:"disconnect"` +} + +type StallWarning struct { + Code string `json:"code"` + Message string `json:"message"` + PercentFull int64 `json:"percent_full"` +} +type stallWarning struct { + Warning *StallWarning `json:"warning"` +} + +type FriendsList []int64 +type friendsList struct { + Friends *FriendsList `json:"friends"` +} + +type streamDirectMessage struct { + DirectMessage *DirectMessage `json:"direct_message"` +} + +type Event struct { + Target *User `json:"target"` + Source *User `json:"source"` + Event string `json:"event"` + CreatedAt string `json:"created_at"` +} + +type EventList struct { + Event + TargetObject *List `json:"target_object"` +} + +type EventTweet struct { + Event + TargetObject *Tweet `json:"target_object"` +} + +type EventFollow struct { + Event +} + +type TooManyFollow struct { + Warning *struct { + Code string `json:"code"` + Message string `json:"message"` + UserId int64 `json:"user_id"` + } `json:"warning"` +} + +// TODO: Site Stream messages. I cant test. + +// Stream allows you to stream using one of the +// PublicStream* or UserStream api methods +// +// A go loop is started an gives you an stream that sends interface{} +// objects through it's chan C +// Objects which you can cast into a tweet like this : +// t, ok := o.(twitter.Tweet) // try casting into a tweet +// if !ok { +// log.Debug("Recieved non tweet message") +// } +// +// If we can't stream the chan will be closed. +// Otherwise the loop will connect and send streams in the chan. +// It will also try to reconnect itself after an exponential backoff time +// if the connection is lost +// If twitter response is one of 420, 429 or 503 (meaning "wait a sec") +// the loop retries to open the socket with a simple autogrowing backoff. +// +// When finished streaming call stream.Stop() to initiate termination process. +// + +type Stream struct { + api TwitterApi + C chan interface{} + run bool +} + +func (s *Stream) listen(response http.Response) { + if response.Body != nil { + defer response.Body.Close() + } + + s.api.Log.Notice("Listening to twitter socket") + defer s.api.Log.Notice("twitter socket closed, leaving loop") + + scanner := bufio.NewScanner(response.Body) + + for scanner.Scan() && s.run { + j := scanner.Bytes() + if len(j) == 0 { + s.api.Log.Debug("Empty bytes... Moving along") + } else { + s.C <- jsonToKnownType(j) + } + } +} + +func jsonToKnownType(j []byte) interface{} { + // TODO: DRY + if o := new(Tweet); jsonAsStruct(j, "/source", &o) { + return *o + } else if o := new(statusDeletionNotice); jsonAsStruct(j, "/delete/status", &o) { + return *o.Delete.Status + } else if o := new(directMessageDeletionNotice); jsonAsStruct(j, "/delete/direct_message", &o) { + return *o.Delete.DirectMessage + } else if o := new(locationDeletionNotice); jsonAsStruct(j, "/scrub_geo", &o) { + return *o.ScrubGeo + } else if o := new(limitNotice); jsonAsStruct(j, "/limit", &o) { + return *o.Limit + } else if o := new(statusWithheldNotice); jsonAsStruct(j, "/status_withheld", &o) { + return *o.StatusWithheld + } else if o := new(userWithheldNotice); jsonAsStruct(j, "/user_withheld", &o) { + return *o.UserWithheld + } else if o := new(disconnectMessage); jsonAsStruct(j, "/disconnect", &o) { + return *o.Disconnect + } else if o := new(stallWarning); jsonAsStruct(j, "/warning", &o) { + return *o.Warning + } else if o := new(friendsList); jsonAsStruct(j, "/friends", &o) { + return *o.Friends + } else if o := new(streamDirectMessage); jsonAsStruct(j, "/direct_message", &o) { + return *o.DirectMessage + } else if o := new(EventTweet); jsonAsStruct(j, "/target_object/source", &o) { + return *o + } else if o := new(EventList); jsonAsStruct(j, "/target_object/slug", &o) { + return *o + } else if o := new(Event); jsonAsStruct(j, "/target_object", &o) { + return *o + } else if o := new(EventFollow); jsonAsStruct(j, "/event", &o) { + return *o + } else { + return nil + } +} + +func (s *Stream) requestStream(urlStr string, v url.Values, method int) (resp *http.Response, err error) { + switch method { + case _GET: + return s.api.oauthClient.Get(s.api.HttpClient, s.api.Credentials, urlStr, v) + case _POST: + return s.api.oauthClient.Post(s.api.HttpClient, s.api.Credentials, urlStr, v) + default: + } + return nil, fmt.Errorf("HTTP method not yet supported") +} + +func (s *Stream) loop(urlStr string, v url.Values, method int) { + defer s.api.Log.Debug("Leaving request stream loop") + defer close(s.C) + + rlb := NewHTTP420ErrBackoff() + for s.run { + resp, err := s.requestStream(urlStr, v, method) + if err != nil { + if err == io.EOF { + // Sometimes twitter closes the stream + // right away with EOF as of a rate limit + resp.StatusCode = 420 + } else { + s.api.Log.Criticalf("Cannot request stream : %s", err) + return + } + } + s.api.Log.Debugf("Response status=%s code=%d", resp.Status, resp.StatusCode) + + switch resp.StatusCode { + case 200, 304: + s.listen(*resp) + rlb.Reset() + case 420, 429, 503: + s.api.Log.Noticef("Twitter streaming: backing off as got : %+s", resp.Status) + rlb.BackOff() + case 400, 401, 403, 404, 406, 410, 422, 500, 502, 504: + s.api.Log.Criticalf("Twitter streaming: leaving after an irremediable error: %+s", resp.Status) + return + default: + s.api.Log.Notice("Received unknown status: %+s", resp.StatusCode) + } + + } +} + +func (s *Stream) Stop() { + s.run = false +} + +func (s *Stream) start(urlStr string, v url.Values, method int) { + s.run = true + go s.loop(urlStr, v, method) +} + +func (a TwitterApi) newStream(urlStr string, v url.Values, method int) *Stream { + stream := Stream{ + api: a, + C: make(chan interface{}), + } + + stream.start(urlStr, v, method) + return &stream +} + +func (a TwitterApi) UserStream(v url.Values) (stream *Stream) { + return a.newStream(BaseUrlUserStream+"/user.json", v, _GET) +} + +func (a TwitterApi) PublicStreamSample(v url.Values) (stream *Stream) { + return a.newStream(BaseUrlStream+"/statuses/sample.json", v, _GET) +} + +// XXX: To use this API authority is requied. but I dont have this. I cant test. +func (a TwitterApi) PublicStreamFirehose(v url.Values) (stream *Stream) { + return a.newStream(BaseUrlStream+"/statuses/firehose.json", v, _GET) +} + +// XXX: PublicStream(Track|Follow|Locations) func is needed? +func (a TwitterApi) PublicStreamFilter(v url.Values) (stream *Stream) { + return a.newStream(BaseUrlStream+"/statuses/filter.json", v, _POST) +} + +// XXX: To use this API authority is requied. but I dont have this. I cant test. +func (a TwitterApi) SiteStream(v url.Values) (stream *Stream) { + return a.newStream(BaseUrlSiteStream+"/site.json", v, _GET) +} + +func jsonAsStruct(j []byte, path string, obj interface{}) (res bool) { + if v, _ := jsonpointer.Find(j, path); v == nil { + return false + } + err := json.Unmarshal(j, obj) + return err == nil +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/timeline.go b/vendor/github.com/ChimeraCoder/anaconda/timeline.go new file mode 100644 index 0000000..1b7d7e7 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/timeline.go @@ -0,0 +1,45 @@ +package anaconda + +import ( + "net/url" +) + +// GetHomeTimeline returns the most recent tweets and retweets posted by the user +// and the users that they follow. +// https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-home_timeline +// By default, include_entities is set to "true" +func (a TwitterApi) GetHomeTimeline(v url.Values) (timeline []Tweet, err error) { + v = cleanValues(v) + if val := v.Get("include_entities"); val == "" { + v.Set("include_entities", "true") + } + + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/statuses/home_timeline.json", v, &timeline, _GET, response_ch} + return timeline, (<-response_ch).err +} + +// GetUserTimeline returns a collection of the most recent Tweets posted by the user indicated by the screen_name or user_id parameters. +// https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-user_timeline +func (a TwitterApi) GetUserTimeline(v url.Values) (timeline []Tweet, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/statuses/user_timeline.json", v, &timeline, _GET, response_ch} + return timeline, (<-response_ch).err +} + +// GetMentionsTimeline returns the most recent mentions (Tweets containing a users’s @screen_name) for the authenticating user. +// The timeline returned is the equivalent of the one seen when you view your mentions on twitter.com. +// https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-mentions_timeline +func (a TwitterApi) GetMentionsTimeline(v url.Values) (timeline []Tweet, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/statuses/mentions_timeline.json", v, &timeline, _GET, response_ch} + return timeline, (<-response_ch).err +} + +// GetRetweetsOfMe returns the most recent Tweets authored by the authenticating user that have been retweeted by others. +// https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-retweets_of_me +func (a TwitterApi) GetRetweetsOfMe(v url.Values) (tweets []Tweet, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/statuses/retweets_of_me.json", v, &tweets, _GET, response_ch} + return tweets, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/trends.go b/vendor/github.com/ChimeraCoder/anaconda/trends.go new file mode 100644 index 0000000..de1d466 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/trends.go @@ -0,0 +1,64 @@ +package anaconda + +import ( + "net/url" + "strconv" +) + +type Location struct { + Name string `json:"name"` + Woeid int `json:"woeid"` +} + +type Trend struct { + Name string `json:"name"` + Query string `json:"query"` + Url string `json:"url"` + PromotedContent string `json:"promoted_content"` +} + +type TrendResponse struct { + Trends []Trend `json:"trends"` + AsOf string `json:"as_of"` + CreatedAt string `json:"created_at"` + Locations []Location `json:"locations"` +} + +type TrendLocation struct { + Country string `json:"country"` + CountryCode string `json:"countryCode"` + Name string `json:"name"` + ParentId int `json:"parentid"` + PlaceType struct { + Code int `json:"code"` + Name string `json:"name"` + } `json:"placeType"` + Url string `json:"url"` + Woeid int32 `json:"woeid"` +} + +// https://developer.twitter.com/en/docs/trends/trends-for-location/api-reference/get-trends-place +func (a TwitterApi) GetTrendsByPlace(id int64, v url.Values) (trendResp TrendResponse, err error) { + response_ch := make(chan response) + v = cleanValues(v) + v.Set("id", strconv.FormatInt(id, 10)) + a.queryQueue <- query{a.baseUrl + "/trends/place.json", v, &[]interface{}{&trendResp}, _GET, response_ch} + return trendResp, (<-response_ch).err +} + +// https://developer.twitter.com/en/docs/trends/locations-with-trending-topics/api-reference/get-trends-available +func (a TwitterApi) GetTrendsAvailableLocations(v url.Values) (locations []TrendLocation, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/trends/available.json", v, &locations, _GET, response_ch} + return locations, (<-response_ch).err +} + +// https://developer.twitter.com/en/docs/trends/locations-with-trending-topics/api-reference/get-trends-closest +func (a TwitterApi) GetTrendsClosestLocations(lat float64, long float64, v url.Values) (locations []TrendLocation, err error) { + response_ch := make(chan response) + v = cleanValues(v) + v.Set("lat", strconv.FormatFloat(lat, 'f', 6, 64)) + v.Set("long", strconv.FormatFloat(long, 'f', 6, 64)) + a.queryQueue <- query{a.baseUrl + "/trends/closest.json", v, &locations, _GET, response_ch} + return locations, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/tweet.go b/vendor/github.com/ChimeraCoder/anaconda/tweet.go new file mode 100644 index 0000000..1f08ca2 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/tweet.go @@ -0,0 +1,154 @@ +package anaconda + +import ( + "encoding/json" + "fmt" + "time" +) + +type Tweet struct { + Contributors []int64 `json:"contributors"` + Coordinates *Coordinates `json:"coordinates"` + CreatedAt string `json:"created_at"` + DisplayTextRange []int `json:"display_text_range"` + Entities Entities `json:"entities"` + ExtendedEntities Entities `json:"extended_entities"` + ExtendedTweet ExtendedTweet `json:"extended_tweet"` + FavoriteCount int `json:"favorite_count"` + Favorited bool `json:"favorited"` + FilterLevel string `json:"filter_level"` + FullText string `json:"full_text"` + HasExtendedProfile bool `json:"has_extended_profile"` + Id int64 `json:"id"` + IdStr string `json:"id_str"` + InReplyToScreenName string `json:"in_reply_to_screen_name"` + InReplyToStatusID int64 `json:"in_reply_to_status_id"` + InReplyToStatusIdStr string `json:"in_reply_to_status_id_str"` + InReplyToUserID int64 `json:"in_reply_to_user_id"` + InReplyToUserIdStr string `json:"in_reply_to_user_id_str"` + IsTranslationEnabled bool `json:"is_translation_enabled"` + Lang string `json:"lang"` + Place Place `json:"place"` + QuotedStatusID int64 `json:"quoted_status_id"` + QuotedStatusIdStr string `json:"quoted_status_id_str"` + QuotedStatus *Tweet `json:"quoted_status"` + PossiblySensitive bool `json:"possibly_sensitive"` + PossiblySensitiveAppealable bool `json:"possibly_sensitive_appealable"` + RetweetCount int `json:"retweet_count"` + Retweeted bool `json:"retweeted"` + RetweetedStatus *Tweet `json:"retweeted_status"` + Source string `json:"source"` + Scopes map[string]interface{} `json:"scopes"` + Text string `json:"text"` + User User `json:"user"` + WithheldCopyright bool `json:"withheld_copyright"` + WithheldInCountries []string `json:"withheld_in_countries"` + WithheldScope string `json:"withheld_scope"` + + //Geo is deprecated + //Geo interface{} `json:"geo"` +} + +// CreatedAtTime is a convenience wrapper that returns the Created_at time, parsed as a time.Time struct +func (t Tweet) CreatedAtTime() (time.Time, error) { + return time.Parse(time.RubyDate, t.CreatedAt) +} + +// It may be worth placing these in an additional source file(s) + +// Could also use User, since the fields match, but only these fields are possible in Contributor +type Contributor struct { + Id int64 `json:"id"` + IdStr string `json:"id_str"` + ScreenName string `json:"screen_name"` +} + +type Coordinates struct { + Coordinates [2]float64 `json:"coordinates"` // Coordinate always has to have exactly 2 values + Type string `json:"type"` +} + +type ExtendedTweet struct { + FullText string `json:"full_text"` + DisplayTextRange []int `json:"display_text_range"` + Entities Entities `json:"entities"` + ExtendedEntities Entities `json:"extended_entities"` +} + +// HasCoordinates is a helper function to easily determine if a Tweet has coordinates associated with it +func (t Tweet) HasCoordinates() bool { + if t.Coordinates != nil { + if t.Coordinates.Type == "Point" { + return true + } + } + return false +} + +// The following provide convenience and eliviate confusion about the order of coordinates in the Tweet + +// Latitude is a convenience wrapper that returns the latitude easily +func (t Tweet) Latitude() (float64, error) { + if t.HasCoordinates() { + return t.Coordinates.Coordinates[1], nil + } + return 0, fmt.Errorf("No Coordinates in this Tweet") +} + +// Longitude is a convenience wrapper that returns the longitude easily +func (t Tweet) Longitude() (float64, error) { + if t.HasCoordinates() { + return t.Coordinates.Coordinates[0], nil + } + return 0, fmt.Errorf("No Coordinates in this Tweet") +} + +// X is a convenience wrapper which returns the X (Longitude) coordinate easily +func (t Tweet) X() (float64, error) { + return t.Longitude() +} + +// Y is a convenience wrapper which return the Y (Lattitude) corrdinate easily +func (t Tweet) Y() (float64, error) { + return t.Latitude() +} + +func (t *Tweet) extractExtendedTweet() { + // if the TruncatedText is set, the API does not return an extended tweet + // we need to manually set the Text in this case + if len(t.Text) > 0 && len(t.FullText) == 0 { + t.FullText = t.Text + } + + if len(t.ExtendedTweet.FullText) > 0 { + t.DisplayTextRange = t.ExtendedTweet.DisplayTextRange + t.Entities = t.ExtendedTweet.Entities + t.ExtendedEntities = t.ExtendedTweet.ExtendedEntities + t.FullText = t.ExtendedTweet.FullText + } + + // if the API supplied us with information how to extract the shortened + // text, extract it + if len(t.Text) == 0 && len(t.DisplayTextRange) == 2 { + t.Text = t.FullText[t.DisplayTextRange[0]:t.DisplayTextRange[1]] + } + // if the truncated text is still empty then full & truncated text are equal + if len(t.Text) == 0 { + t.Text = t.FullText + } +} + +func (t *Tweet) UnmarshalJSON(data []byte) error { + type Alias Tweet + aux := &struct { + *Alias + }{ + Alias: (*Alias)(t), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + t.extractExtendedTweet() + return nil +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/tweets.go b/vendor/github.com/ChimeraCoder/anaconda/tweets.go new file mode 100644 index 0000000..0baf4b2 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/tweets.go @@ -0,0 +1,107 @@ +package anaconda + +import ( + "fmt" + "net/url" + "strconv" +) + +func (a TwitterApi) GetTweet(id int64, v url.Values) (tweet Tweet, err error) { + v = cleanValues(v) + v.Set("id", strconv.FormatInt(id, 10)) + + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/statuses/show.json", v, &tweet, _GET, response_ch} + return tweet, (<-response_ch).err +} + +func (a TwitterApi) GetTweetsLookupByIds(ids []int64, v url.Values) (tweet []Tweet, err error) { + var pids string + for w, i := range ids { + pids += strconv.FormatInt(i, 10) + if w != len(ids)-1 { + pids += "," + } + } + v = cleanValues(v) + v.Set("id", pids) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/statuses/lookup.json", v, &tweet, _GET, response_ch} + return tweet, (<-response_ch).err +} + +func (a TwitterApi) GetRetweets(id int64, v url.Values) (tweets []Tweet, err error) { + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + fmt.Sprintf("/statuses/retweets/%d.json", id), v, &tweets, _GET, response_ch} + return tweets, (<-response_ch).err +} + +//PostTweet will create a tweet with the specified status message +func (a TwitterApi) PostTweet(status string, v url.Values) (tweet Tweet, err error) { + v = cleanValues(v) + v.Set("status", status) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/statuses/update.json", v, &tweet, _POST, response_ch} + return tweet, (<-response_ch).err +} + +//DeleteTweet will destroy (delete) the status (tweet) with the specified ID, assuming that the authenticated user is the author of the status (tweet). +//If trimUser is set to true, only the user's Id will be provided in the user object returned. +func (a TwitterApi) DeleteTweet(id int64, trimUser bool) (tweet Tweet, err error) { + v := url.Values{} + if trimUser { + v.Set("trim_user", "t") + } + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + fmt.Sprintf("/statuses/destroy/%d.json", id), v, &tweet, _POST, response_ch} + return tweet, (<-response_ch).err +} + +//Retweet will retweet the status (tweet) with the specified ID. +//trimUser functions as in DeleteTweet +func (a TwitterApi) Retweet(id int64, trimUser bool) (rt Tweet, err error) { + v := url.Values{} + if trimUser { + v.Set("trim_user", "t") + } + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + fmt.Sprintf("/statuses/retweet/%d.json", id), v, &rt, _POST, response_ch} + return rt, (<-response_ch).err +} + +//UnRetweet will renove retweet Untweets a retweeted status. +//Returns the original Tweet with retweet details embedded. +// +//https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-unretweet-id +//trim_user: tweet returned in a timeline will include a user object +//including only the status authors numerical ID. +func (a TwitterApi) UnRetweet(id int64, trimUser bool) (rt Tweet, err error) { + v := url.Values{} + if trimUser { + v.Set("trim_user", "t") + } + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + fmt.Sprintf("/statuses/unretweet/%d.json", id), v, &rt, _POST, response_ch} + return rt, (<-response_ch).err +} + +// Favorite will favorite the status (tweet) with the specified ID. +// https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-favorites-create +func (a TwitterApi) Favorite(id int64) (rt Tweet, err error) { + v := url.Values{} + v.Set("id", fmt.Sprint(id)) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + fmt.Sprintf("/favorites/create.json"), v, &rt, _POST, response_ch} + return rt, (<-response_ch).err +} + +// Un-favorites the status specified in the ID parameter as the authenticating user. +// Returns the un-favorited status in the requested format when successful. +// https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-favorites-destroy +func (a TwitterApi) Unfavorite(id int64) (rt Tweet, err error) { + v := url.Values{} + v.Set("id", fmt.Sprint(id)) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + fmt.Sprintf("/favorites/destroy.json"), v, &rt, _POST, response_ch} + return rt, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/twitter.go b/vendor/github.com/ChimeraCoder/anaconda/twitter.go new file mode 100644 index 0000000..3dc68ff --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/twitter.go @@ -0,0 +1,370 @@ +//Package anaconda provides structs and functions for accessing version 1.1 +//of the Twitter API. +// +//Successful API queries return native Go structs that can be used immediately, +//with no need for type assertions. +// +//Authentication +// +//If you already have the access token (and secret) for your user (Twitter provides this for your own account on the developer portal), creating the client is simple: +// +// anaconda.SetConsumerKey("your-consumer-key") +// anaconda.SetConsumerSecret("your-consumer-secret") +// api := anaconda.NewTwitterApi("your-access-token", "your-access-token-secret") +// +// +//Queries +// +//Executing queries on an authenticated TwitterApi struct is simple. +// +// searchResult, _ := api.GetSearch("golang", nil) +// for _ , tweet := range searchResult.Statuses { +// fmt.Print(tweet.Text) +// } +// +//Certain endpoints allow separate optional parameter; if desired, these can be passed as the final parameter. +// +// v := url.Values{} +// v.Set("count", "30") +// result, err := api.GetSearch("golang", v) +// +// +//Endpoints +// +//Anaconda implements most of the endpoints defined in the Twitter API documentation: https://dev.twitter.com/docs/api/1.1. +//For clarity, in most cases, the function name is simply the name of the HTTP method and the endpoint (e.g., the endpoint `GET /friendships/incoming` is provided by the function `GetFriendshipsIncoming`). +// +//In a few cases, a shortened form has been chosen to make life easier (for example, retweeting is simply the function `Retweet`) +// +//More detailed information about the behavior of each particular endpoint can be found at the official Twitter API documentation. +package anaconda + +import ( + "compress/zlib" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/ChimeraCoder/tokenbucket" + "github.com/garyburd/go-oauth/oauth" +) + +const ( + _GET = iota + _POST = iota + _DELETE = iota + _PUT = iota + BaseUrlV1 = "https://api.twitter.com/1" + BaseUrl = "https://api.twitter.com/1.1" + UploadBaseUrl = "https://upload.twitter.com/1.1" +) + +var ( + oauthCredentials oauth.Credentials +) + +type TwitterApi struct { + oauthClient oauth.Client + Credentials *oauth.Credentials + queryQueue chan query + bucket *tokenbucket.Bucket + returnRateLimitError bool + HttpClient *http.Client + + // Currently used only for the streaming API + // and for checking rate-limiting headers + // Default logger is silent + Log Logger + + // used for testing + // defaults to BaseUrl + baseUrl string +} + +type query struct { + url string + form url.Values + data interface{} + method int + response_ch chan response +} + +type response struct { + data interface{} + err error +} + +const DEFAULT_DELAY = 0 * time.Second +const DEFAULT_CAPACITY = 5 + +//NewTwitterApi takes an user-specific access token and secret and returns a TwitterApi struct for that user. +//The TwitterApi struct can be used for accessing any of the endpoints available. +func NewTwitterApi(access_token string, access_token_secret string) *TwitterApi { + //TODO figure out how much to buffer this channel + //A non-buffered channel will cause blocking when multiple queries are made at the same time + queue := make(chan query) + c := &TwitterApi{ + oauthClient: oauth.Client{ + TemporaryCredentialRequestURI: "https://api.twitter.com/oauth/request_token", + ResourceOwnerAuthorizationURI: "https://api.twitter.com/oauth/authenticate", + TokenRequestURI: "https://api.twitter.com/oauth/access_token", + Credentials: oauthCredentials, + }, + Credentials: &oauth.Credentials{ + Token: access_token, + Secret: access_token_secret, + }, + queryQueue: queue, + bucket: nil, + returnRateLimitError: false, + HttpClient: http.DefaultClient, + Log: silentLogger{}, + baseUrl: BaseUrl, + } + go c.throttledQuery() + return c +} + +//NewTwitterApiWithCredentials takes an app-specific consumer key and secret, along with a user-specific access token and secret and returns a TwitterApi struct for that user. +//The TwitterApi struct can be used for accessing any of the endpoints available. +func NewTwitterApiWithCredentials(access_token string, access_token_secret string, consumer_key string, consumer_secret string) *TwitterApi { + api := NewTwitterApi(access_token, access_token_secret) + api.oauthClient.Credentials.Token = consumer_key + api.oauthClient.Credentials.Secret = consumer_secret + return api +} + +//SetConsumerKey will set the application-specific consumer_key used in the initial OAuth process +//This key is listed on https://dev.twitter.com/apps/YOUR_APP_ID/show +func SetConsumerKey(consumer_key string) { + oauthCredentials.Token = consumer_key +} + +//SetConsumerSecret will set the application-specific secret used in the initial OAuth process +//This secret is listed on https://dev.twitter.com/apps/YOUR_APP_ID/show +func SetConsumerSecret(consumer_secret string) { + oauthCredentials.Secret = consumer_secret +} + +// ReturnRateLimitError specifies behavior when the Twitter API returns a rate-limit error. +// If set to true, the query will fail and return the error instead of automatically queuing and +// retrying the query when the rate limit expires +func (c *TwitterApi) ReturnRateLimitError(b bool) { + c.returnRateLimitError = b +} + +// Enable query throttling using the tokenbucket algorithm +func (c *TwitterApi) EnableThrottling(rate time.Duration, bufferSize int64) { + c.bucket = tokenbucket.NewBucket(rate, bufferSize) +} + +// Disable query throttling +func (c *TwitterApi) DisableThrottling() { + c.bucket = nil +} + +// SetDelay will set the delay between throttled queries +// To turn of throttling, set it to 0 seconds +func (c *TwitterApi) SetDelay(t time.Duration) { + c.bucket.SetRate(t) +} + +func (c *TwitterApi) GetDelay() time.Duration { + return c.bucket.GetRate() +} + +// SetBaseUrl is experimental and may be removed in future releases. +func (c *TwitterApi) SetBaseUrl(baseUrl string) { + c.baseUrl = baseUrl +} + +//AuthorizationURL generates the authorization URL for the first part of the OAuth handshake. +//Redirect the user to this URL. +//This assumes that the consumer key has already been set (using SetConsumerKey or NewTwitterApiWithCredentials). +func (c *TwitterApi) AuthorizationURL(callback string) (string, *oauth.Credentials, error) { + tempCred, err := c.oauthClient.RequestTemporaryCredentials(http.DefaultClient, callback, nil) + if err != nil { + return "", nil, err + } + return c.oauthClient.AuthorizationURL(tempCred, nil), tempCred, nil +} + +// GetCredentials gets the access token using the verifier received with the callback URL and the +// credentials in the first part of the handshake. GetCredentials implements the third part of the OAuth handshake. +// The returned url.Values holds the access_token, the access_token_secret, the user_id and the screen_name. +func (c *TwitterApi) GetCredentials(tempCred *oauth.Credentials, verifier string) (*oauth.Credentials, url.Values, error) { + return c.oauthClient.RequestToken(http.DefaultClient, tempCred, verifier) +} + +func defaultValues(v url.Values) url.Values { + if v == nil { + v = url.Values{} + } + v.Set("tweet_mode", "extended") + return v +} + +func cleanValues(v url.Values) url.Values { + if v == nil { + return url.Values{} + } + return v +} + +// apiGet issues a GET request to the Twitter API and decodes the response JSON to data. +func (c TwitterApi) apiGet(urlStr string, form url.Values, data interface{}) error { + form = defaultValues(form) + resp, err := c.oauthClient.Get(c.HttpClient, c.Credentials, urlStr, form) + if err != nil { + return err + } + defer resp.Body.Close() + return decodeResponse(resp, data) +} + +// apiPost issues a POST request to the Twitter API and decodes the response JSON to data. +func (c TwitterApi) apiPost(urlStr string, form url.Values, data interface{}) error { + resp, err := c.oauthClient.Post(c.HttpClient, c.Credentials, urlStr, form) + if err != nil { + return err + } + defer resp.Body.Close() + return decodeResponse(resp, data) +} + +// apiDel issues a DELETE request to the Twitter API and decodes the response JSON to data. +func (c TwitterApi) apiDel(urlStr string, form url.Values, data interface{}) error { + resp, err := c.oauthClient.Delete(c.HttpClient, c.Credentials, urlStr, form) + if err != nil { + return err + } + defer resp.Body.Close() + return decodeResponse(resp, data) +} + +// apiPut issues a PUT request to the Twitter API and decodes the response JSON to data. +func (c TwitterApi) apiPut(urlStr string, form url.Values, data interface{}) error { + resp, err := c.oauthClient.Put(c.HttpClient, c.Credentials, urlStr, form) + if err != nil { + return err + } + defer resp.Body.Close() + return decodeResponse(resp, data) +} + +// decodeResponse decodes the JSON response from the Twitter API. +func decodeResponse(resp *http.Response, data interface{}) error { + // Prevent memory leak in the case where the Response.Body is not used. + // As per the net/http package, Response.Body still needs to be closed. + defer resp.Body.Close() + + // Twitter returns deflate data despite the client only requesting gzip + // data. net/http automatically handles the latter but not the former: + // https://github.com/golang/go/issues/18779 + if resp.Header.Get("Content-Encoding") == "deflate" { + var err error + resp.Body, err = zlib.NewReader(resp.Body) + if err != nil { + return err + } + } + + // according to dev.twitter.com, chunked upload append returns HTTP 2XX + // so we need a special case when decoding the response + if strings.HasSuffix(resp.Request.URL.String(), "upload.json") { + if resp.StatusCode == 204 { + // empty response, don't decode + return nil + } + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return newApiError(resp) + } + } else if resp.StatusCode != 200 { + return newApiError(resp) + } + return json.NewDecoder(resp.Body).Decode(data) +} + +func NewApiError(resp *http.Response) *ApiError { + body, _ := ioutil.ReadAll(resp.Body) + + return &ApiError{ + StatusCode: resp.StatusCode, + Header: resp.Header, + Body: string(body), + URL: resp.Request.URL, + } +} + +//query executes a query to the specified url, sending the values specified by form, and decodes the response JSON to data +//method can be either _GET or _POST +func (c TwitterApi) execQuery(urlStr string, form url.Values, data interface{}, method int) error { + switch method { + case _GET: + return c.apiGet(urlStr, form, data) + case _POST: + return c.apiPost(urlStr, form, data) + case _DELETE: + return c.apiPost(urlStr, form, data) + case _PUT: + return c.apiPost(urlStr, form, data) + default: + return fmt.Errorf("HTTP method not yet supported") + } +} + +// throttledQuery executes queries and automatically throttles them according to SECONDS_PER_QUERY +// It is the only function that reads from the queryQueue for a particular *TwitterApi struct + +func (c *TwitterApi) throttledQuery() { + for q := range c.queryQueue { + url := q.url + form := q.form + data := q.data //This is where the actual response will be written + method := q.method + + response_ch := q.response_ch + + if c.bucket != nil { + <-c.bucket.SpendToken(1) + } + + err := c.execQuery(url, form, data, method) + + // Check if Twitter returned a rate-limiting error + if err != nil { + if apiErr, ok := err.(*ApiError); ok { + if isRateLimitError, nextWindow := apiErr.RateLimitCheck(); isRateLimitError && !c.returnRateLimitError { + c.Log.Info(apiErr.Error()) + + // If this is a rate-limiting error, re-add the job to the queue + // TODO it really should preserve order + go func(q query) { + c.queryQueue <- q + }(q) + + delay := nextWindow.Sub(time.Now()) + <-time.After(delay) + + // Drain the bucket (start over fresh) + if c.bucket != nil { + c.bucket.Drain() + } + + continue + } + } + } + + response_ch <- response{data, err} + } +} + +// Close query queue +func (c *TwitterApi) Close() { + close(c.queryQueue) +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/twitter_entities.go b/vendor/github.com/ChimeraCoder/anaconda/twitter_entities.go new file mode 100644 index 0000000..c6b3565 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/twitter_entities.go @@ -0,0 +1,74 @@ +package anaconda + +type UrlEntity struct { + Urls []struct { + Indices []int `json:"indices"` + Url string `json:"url"` + Display_url string `json:"display_url"` + Expanded_url string `json:"expanded_url"` + } `json:"urls"` +} + +type Entities struct { + Urls []struct { + Indices []int `json:"indices"` + Url string `json:"url"` + Display_url string `json:"display_url"` + Expanded_url string `json:"expanded_url"` + } `json:"urls"` + Hashtags []struct { + Indices []int `json:"indices"` + Text string `json:"text"` + } `json:"hashtags"` + Url UrlEntity `json:"url"` + User_mentions []struct { + Name string `json:"name"` + Indices []int `json:"indices"` + Screen_name string `json:"screen_name"` + Id int64 `json:"id"` + Id_str string `json:"id_str"` + } `json:"user_mentions"` + Media []EntityMedia `json:"media"` +} + +type EntityMedia struct { + Id int64 `json:"id"` + Id_str string `json:"id_str"` + Media_url string `json:"media_url"` + Media_url_https string `json:"media_url_https"` + Url string `json:"url"` + Display_url string `json:"display_url"` + Expanded_url string `json:"expanded_url"` + Sizes MediaSizes `json:"sizes"` + Source_status_id int64 `json:"source_status_id"` + Source_status_id_str string `json:"source_status_id_str"` + Type string `json:"type"` + Indices []int `json:"indices"` + VideoInfo VideoInfo `json:"video_info"` + ExtAltText string `json:"ext_alt_text"` +} + +type MediaSizes struct { + Medium MediaSize `json:"medium"` + Thumb MediaSize `json:"thumb"` + Small MediaSize `json:"small"` + Large MediaSize `json:"large"` +} + +type MediaSize struct { + W int `json:"w"` + H int `json:"h"` + Resize string `json:"resize"` +} + +type VideoInfo struct { + AspectRatio []int `json:"aspect_ratio"` + DurationMillis int64 `json:"duration_millis"` + Variants []Variant `json:"variants"` +} + +type Variant struct { + Bitrate int `json:"bitrate"` + ContentType string `json:"content_type"` + Url string `json:"url"` +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/twitter_user.go b/vendor/github.com/ChimeraCoder/anaconda/twitter_user.go new file mode 100644 index 0000000..28cb500 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/twitter_user.go @@ -0,0 +1,53 @@ +package anaconda + +type User struct { + ContributorsEnabled bool `json:"contributors_enabled"` + CreatedAt string `json:"created_at"` + DefaultProfile bool `json:"default_profile"` + DefaultProfileImage bool `json:"default_profile_image"` + Description string `json:"description"` + Email string `json:"email"` + Entities Entities `json:"entities"` + FavouritesCount int `json:"favourites_count"` + FollowRequestSent bool `json:"follow_request_sent"` + FollowersCount int `json:"followers_count"` + Following bool `json:"following"` + FriendsCount int `json:"friends_count"` + GeoEnabled bool `json:"geo_enabled"` + HasExtendedProfile bool `json:"has_extended_profile"` + Id int64 `json:"id"` + IdStr string `json:"id_str"` + IsTranslator bool `json:"is_translator"` + IsTranslationEnabled bool `json:"is_translation_enabled"` + Lang string `json:"lang"` // BCP-47 code of user defined language + ListedCount int64 `json:"listed_count"` + Location string `json:"location"` // User defined location + Name string `json:"name"` + Notifications bool `json:"notifications"` + ProfileBackgroundColor string `json:"profile_background_color"` + ProfileBackgroundImageURL string `json:"profile_background_image_url"` + ProfileBackgroundImageUrlHttps string `json:"profile_background_image_url_https"` + ProfileBackgroundTile bool `json:"profile_background_tile"` + ProfileBannerURL string `json:"profile_banner_url"` + ProfileImageURL string `json:"profile_image_url"` + ProfileImageUrlHttps string `json:"profile_image_url_https"` + ProfileLinkColor string `json:"profile_link_color"` + ProfileSidebarBorderColor string `json:"profile_sidebar_border_color"` + ProfileSidebarFillColor string `json:"profile_sidebar_fill_color"` + ProfileTextColor string `json:"profile_text_color"` + ProfileUseBackgroundImage bool `json:"profile_use_background_image"` + Protected bool `json:"protected"` + ScreenName string `json:"screen_name"` + ShowAllInlineMedia bool `json:"show_all_inline_media"` + Status *Tweet `json:"status"` // Only included if the user is a friend + StatusesCount int64 `json:"statuses_count"` + TimeZone string `json:"time_zone"` + URL string `json:"url"` + UtcOffset int `json:"utc_offset"` + Verified bool `json:"verified"` + WithheldInCountries []string `json:"withheld_in_countries"` + WithheldScope string `json:"withheld_scope"` +} + +// Provide language translator from BCP-47 to human readable format for Lang field? +// Available through golang.org/x/text/language, deserves further investigation diff --git a/vendor/github.com/ChimeraCoder/anaconda/users.go b/vendor/github.com/ChimeraCoder/anaconda/users.go new file mode 100644 index 0000000..d51f9c3 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/users.go @@ -0,0 +1,89 @@ +package anaconda + +import ( + "net/url" + "strconv" +) + +func (a TwitterApi) GetUsersLookup(usernames string, v url.Values) (u []User, err error) { + v = cleanValues(v) + v.Set("screen_name", usernames) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/users/lookup.json", v, &u, _GET, response_ch} + return u, (<-response_ch).err +} + +func (a TwitterApi) GetUsersLookupByIds(ids []int64, v url.Values) (u []User, err error) { + var pids string + for w, i := range ids { + //pids += strconv.Itoa(i) + pids += strconv.FormatInt(i, 10) + if w != len(ids)-1 { + pids += "," + } + } + v = cleanValues(v) + v.Set("user_id", pids) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/users/lookup.json", v, &u, _GET, response_ch} + return u, (<-response_ch).err +} + +func (a TwitterApi) GetUsersShow(username string, v url.Values) (u User, err error) { + v = cleanValues(v) + v.Set("screen_name", username) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/users/show.json", v, &u, _GET, response_ch} + return u, (<-response_ch).err +} + +func (a TwitterApi) GetUsersShowById(id int64, v url.Values) (u User, err error) { + v = cleanValues(v) + v.Set("user_id", strconv.FormatInt(id, 10)) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/users/show.json", v, &u, _GET, response_ch} + return u, (<-response_ch).err +} + +func (a TwitterApi) GetUserSearch(searchTerm string, v url.Values) (u []User, err error) { + v = cleanValues(v) + v.Set("q", searchTerm) + // Set other values before calling this method: + // page, count, include_entities + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/users/search.json", v, &u, _GET, response_ch} + return u, (<-response_ch).err +} + +func (a TwitterApi) GetUsersSuggestions(v url.Values) (u []User, err error) { + v = cleanValues(v) + // Set other values before calling this method: + // page, count, include_entities + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/users/suggestions.json", v, &u, _GET, response_ch} + return u, (<-response_ch).err +} + +// PostUsersReportSpam : Reports and Blocks a User by screen_name +// Reference : https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/post-users-report_spam +// If you don't want to block the user you should add +// v.Set("perform_block", "false") +func (a TwitterApi) PostUsersReportSpam(username string, v url.Values) (u User, err error) { + v = cleanValues(v) + v.Set("screen_name", username) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/users/report_spam.json", v, &u, _POST, response_ch} + return u, (<-response_ch).err +} + +// PostUsersReportSpamById : Reports and Blocks a User by user_id +// Reference : https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/post-users-report_spam +// If you don't want to block the user you should add +// v.Set("perform_block", "false") +func (a TwitterApi) PostUsersReportSpamById(id int64, v url.Values) (u User, err error) { + v = cleanValues(v) + v.Set("user_id", strconv.FormatInt(id, 10)) + response_ch := make(chan response) + a.queryQueue <- query{a.baseUrl + "/users/report_spam.json", v, &u, _POST, response_ch} + return u, (<-response_ch).err +} diff --git a/vendor/github.com/ChimeraCoder/anaconda/webhook.go b/vendor/github.com/ChimeraCoder/anaconda/webhook.go new file mode 100644 index 0000000..8b492dd --- /dev/null +++ b/vendor/github.com/ChimeraCoder/anaconda/webhook.go @@ -0,0 +1,78 @@ +package anaconda + +import ( + "net/url" +) + +//GetActivityWebhooks represents the twitter account_activity webhook +//Returns all URLs and their statuses for the given app. Currently, +//only one webhook URL can be registered to an application. +//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/get-webhook-config +func (a TwitterApi) GetActivityWebhooks(v url.Values) (u []WebHookResp, err error) { + responseCh := make(chan response) + a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks.json", v, &u, _GET, responseCh} + return u, (<-responseCh).err +} + +//WebHookResp represents the Get webhook responses +type WebHookResp struct { + ID string + URL string + Valid bool + CreatedAt string +} + +//SetActivityWebhooks represents to set twitter account_activity webhook +//Registers a new webhook URL for the given application context. +//The URL will be validated via CRC request before saving. In case the validation fails, +//a comprehensive error is returned. message to the requester. +//Only one webhook URL can be registered to an application. +//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/new-webhook-config +func (a TwitterApi) SetActivityWebhooks(v url.Values) (u []WebHookResp, err error) { + responseCh := make(chan response) + a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks.json", v, &u, _POST, responseCh} + return u, (<-responseCh).err +} + +//DeleteActivityWebhooks Removes the webhook from the provided application’s configuration. +//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/delete-webhook-config +func (a TwitterApi) DeleteActivityWebhooks(v url.Values, webhookID string) (u interface{}, err error) { + responseCh := make(chan response) + a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks/" + webhookID + ".json", v, &u, _DELETE, responseCh} + return u, (<-responseCh).err +} + +//PutActivityWebhooks update webhook which reenables the webhook by setting its status to valid. +//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/validate-webhook-config +func (a TwitterApi) PutActivityWebhooks(v url.Values, webhookID string) (u interface{}, err error) { + responseCh := make(chan response) + a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks/" + webhookID + ".json", v, &u, _PUT, responseCh} + return u, (<-responseCh).err +} + +//SetWHSubscription Subscribes the provided app to events for the provided user context. +//When subscribed, all DM events for the provided user will be sent to the app’s webhook via POST request. +//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/new-subscription +func (a TwitterApi) SetWHSubscription(v url.Values, webhookID string) (u interface{}, err error) { + responseCh := make(chan response) + a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks/" + webhookID + "/subscriptions.json", v, &u, _POST, responseCh} + return u, (<-responseCh).err +} + +//GetWHSubscription Provides a way to determine if a webhook configuration is +//subscribed to the provided user’s Direct Messages. +//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/get-subscription +func (a TwitterApi) GetWHSubscription(v url.Values, webhookID string) (u interface{}, err error) { + responseCh := make(chan response) + a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks/" + webhookID + "/subscriptions.json", v, &u, _GET, responseCh} + return u, (<-responseCh).err +} + +//DeleteWHSubscription Deactivates subscription for the provided user context and app. After deactivation, +//all DM events for the requesting user will no longer be sent to the webhook URL.. +//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/delete-subscription +func (a TwitterApi) DeleteWHSubscription(v url.Values, webhookID string) (u interface{}, err error) { + responseCh := make(chan response) + a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks/" + webhookID + "/subscriptions.json", v, &u, _DELETE, responseCh} + return u, (<-responseCh).err +} diff --git a/vendor/github.com/ChimeraCoder/tokenbucket/.gitignore b/vendor/github.com/ChimeraCoder/tokenbucket/.gitignore new file mode 100644 index 0000000..5947b74 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/tokenbucket/.gitignore @@ -0,0 +1,4 @@ +*.swp +*.swo +*.swn +conf.sh diff --git a/vendor/github.com/ChimeraCoder/tokenbucket/COPYING b/vendor/github.com/ChimeraCoder/tokenbucket/COPYING new file mode 100644 index 0000000..65c5ca8 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/tokenbucket/COPYING @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. diff --git a/vendor/github.com/ChimeraCoder/tokenbucket/LICENSE b/vendor/github.com/ChimeraCoder/tokenbucket/LICENSE new file mode 120000 index 0000000..d24842f --- /dev/null +++ b/vendor/github.com/ChimeraCoder/tokenbucket/LICENSE @@ -0,0 +1 @@ +COPYING \ No newline at end of file diff --git a/vendor/github.com/ChimeraCoder/tokenbucket/README b/vendor/github.com/ChimeraCoder/tokenbucket/README new file mode 100644 index 0000000..e47bf17 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/tokenbucket/README @@ -0,0 +1,48 @@ +[![GoDoc](http://godoc.org/github.com/ChimeraCoder/tokenbucket?status.png)](http://godoc.org/github.com/ChimeraCoder/tokenbucket) + +tokenbucket +==================== + +This package provides an implementation of [Token bucket](https://en.wikipedia.org/wiki/Token_bucket) scheduling in Go. It is useful for implementing rate-limiting, traffic shaping, or other sorts of scheduling that depend on bandwidth constraints. + + +Example +------------ + + +To create a new bucket, specify a capacity (how many tokens can be stored "in the bank"), and a rate (how often a new token is added). + +````go + + // Create a new bucket + // Allow a new action every 5 seconds, with a maximum of 3 "in the bank" + bucket := tokenbucket.NewBucket(3, 5 * time.Second) +```` + +This bucket should be shared between any functions that share the same constraints. (These functions may or may not run in separate goroutines). + + +Anytime a regulated action is performed, spend a token. + +````go + // To perform a regulated action, we must spend a token + // RegulatedAction will not be performed until the bucket contains enough tokens + <-bucket.SpendToken(1) + RegulatedAction() +```` + +`SpendToken` returns immediately. Reading from the channel that it returns will block until the action has "permission" to continue (ie, until there are enough tokens in the bucket). + + +(The channel that `SpendToken` returns is of type `error`. For now, the value will always be `nil`, so it can be ignored.) + + + +####License + +`tokenbucket` is free software provided under version 3 of the LGPL license. + + +Software that uses `tokenbucket` may be released under *any* license, as long as the source code for `tokenbucket` (including any modifications) are made available under the LGPLv3 license. + +You do not need to release the rest of the software under the LGPL, or any free/open-source license, for that matter (though we would encourage you to do so!). diff --git a/vendor/github.com/ChimeraCoder/tokenbucket/README.md b/vendor/github.com/ChimeraCoder/tokenbucket/README.md new file mode 120000 index 0000000..100b938 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/tokenbucket/README.md @@ -0,0 +1 @@ +README \ No newline at end of file diff --git a/vendor/github.com/ChimeraCoder/tokenbucket/tokenbucket.go b/vendor/github.com/ChimeraCoder/tokenbucket/tokenbucket.go new file mode 100644 index 0000000..215e7a7 --- /dev/null +++ b/vendor/github.com/ChimeraCoder/tokenbucket/tokenbucket.go @@ -0,0 +1,86 @@ +package tokenbucket + +import ( + "sync" + "time" +) + +type Bucket struct { + capacity int64 + tokens chan struct{} + rate time.Duration // Add a token to the bucket every 1/r units of time + rateMutex sync.Mutex +} + +func NewBucket(rate time.Duration, capacity int64) *Bucket { + + //A bucket is simply a channel with a buffer representing the maximum size + tokens := make(chan struct{}, capacity) + + b := &Bucket{capacity, tokens, rate, sync.Mutex{}} + + //Set off a function that will continuously add tokens to the bucket + go func(b *Bucket) { + ticker := time.NewTicker(rate) + for _ = range ticker.C { + b.tokens <- struct{}{} + } + }(b) + + return b +} + +func (b *Bucket) GetRate() time.Duration { + b.rateMutex.Lock() + tmp := b.rate + b.rateMutex.Unlock() + return tmp +} + +func (b *Bucket) SetRate(rate time.Duration) { + b.rateMutex.Lock() + b.rate = rate + b.rateMutex.Unlock() +} + +//AddTokens manually adds n tokens to the bucket +func (b *Bucket) AddToken(n int64) { +} + +func (b *Bucket) withdrawTokens(n int64) error { + for i := int64(0); i < n; i++ { + <-b.tokens + } + return nil +} + +func (b *Bucket) SpendToken(n int64) <-chan error { + // Default to spending a single token + if n < 0 { + n = 1 + } + + c := make(chan error) + go func(b *Bucket, n int64, c chan error) { + c <- b.withdrawTokens(n) + close(c) + return + }(b, n, c) + + return c +} + +// Drain will empty all tokens in the bucket +// If the tokens are being added too quickly (if the rate is too fast) +// this will never drain +func (b *Bucket) Drain() error{ + // TODO replace this with a more solid approach (such as replacing the channel altogether) + for { + select { + case _ = <-b.tokens: + continue + default: + return nil + } + } +} diff --git a/vendor/github.com/azr/backoff/.gitignore b/vendor/github.com/azr/backoff/.gitignore new file mode 100644 index 0000000..0026861 --- /dev/null +++ b/vendor/github.com/azr/backoff/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/azr/backoff/.travis.yml b/vendor/github.com/azr/backoff/.travis.yml new file mode 100644 index 0000000..ce9cb62 --- /dev/null +++ b/vendor/github.com/azr/backoff/.travis.yml @@ -0,0 +1,2 @@ +language: go +go: 1.3.3 diff --git a/vendor/github.com/azr/backoff/LICENSE b/vendor/github.com/azr/backoff/LICENSE new file mode 100644 index 0000000..89b8179 --- /dev/null +++ b/vendor/github.com/azr/backoff/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/azr/backoff/README.md b/vendor/github.com/azr/backoff/README.md new file mode 100644 index 0000000..42442eb --- /dev/null +++ b/vendor/github.com/azr/backoff/README.md @@ -0,0 +1,22 @@ +# backoff + +[![GoDoc](https://godoc.org/github.com/azr/backoff?status.png)](https://godoc.org/github.com/azr/backoff) +[![Build Status](https://travis-ci.org/azr/backoff.png)](https://travis-ci.org/azr/backoff) + +This is a fork from the awesome [cenkalti/backoff](github.com/cenkalti/backoff) which is a go port from +[google-http-java-client](https://code.google.com/p/google-http-java-client/wiki/ExponentialBackoff). + +This BackOff sleeps upon BackOff() and calculates its next backoff time instead of returning the duration to sleep. + +[Exponential backoff](http://en.wikipedia.org/wiki/Exponential_backoff) +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + + + +## Install + +```bash +go get github.com/azr/backoff +``` diff --git a/vendor/github.com/azr/backoff/backoff.go b/vendor/github.com/azr/backoff/backoff.go new file mode 100644 index 0000000..310db6e --- /dev/null +++ b/vendor/github.com/azr/backoff/backoff.go @@ -0,0 +1,51 @@ +//Package backoff helps you at backing off ! +// +//It was forked from github.com/cenkalti/backoff which is awesome. +// +//This BackOff sleeps upon BackOff() and calculates its next backoff time instead of returning the duration to sleep. +package backoff + +import "time" + +// Interface interface to use after a retryable operation failed. +// A Interface.BackOff sleeps. +type Interface interface { + // Example usage: + // + // for ;; { + // err, canRetry := somethingThatCanFail() + // if err != nil && canRetry { + // backoffer.Backoff() + // } + // } + BackOff() + + // Reset to initial state. + Reset() +} + +// ZeroBackOff is a fixed back-off policy whose back-off time is always zero, +// meaning that the operation is retried immediately without waiting. +type ZeroBackOff struct{} + +var _ Interface = (*ZeroBackOff)(nil) + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) BackOff() {} + +type ConstantBackOff struct { + Interval time.Duration +} + +var _ Interface = (*ConstantBackOff)(nil) + +func (b *ConstantBackOff) Reset() {} + +func (b *ConstantBackOff) BackOff() { + time.Sleep(b.Interval) +} + +func NewConstant(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/azr/backoff/exponential.go b/vendor/github.com/azr/backoff/exponential.go new file mode 100644 index 0000000..aa4cf8e --- /dev/null +++ b/vendor/github.com/azr/backoff/exponential.go @@ -0,0 +1,112 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is an implementation of BackOff that increases +it's back off period for each retry attempt using a randomization function +that grows exponentially. +Backoff() time is calculated using the following formula: + randomized_interval = + retry_interval * (random value in range [1 - randomization_factor, 1 + randomization_factor]) +In other words BackOff() will sleep for times between the randomization factor +percentage below and above the retry interval. +For example, using 2 seconds as the base retry interval and 0.5 as the +randomization factor, the actual back off period used in the next retry +attempt will be between 1 and 3 seconds. + +Note: max_interval caps the retry_interval and not the randomized_interval. + +Example: The default retry_interval is .5 seconds, default randomization_factor +is 0.5, default multiplier is 1.5 and the max_interval is set to 25 seconds. +For 12 tries the sequence will sleep (values in seconds) (output from ExampleExpBackOffTimes) : + + request# retry_interval randomized_interval + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.22 [9.611, 28.833] + 11 25 [12.5, 37.5] + 12 25 [12.5, 37.5] +Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + currentInterval time.Duration + MaxInterval time.Duration + + RandomizationFactor float64 + Multiplier float64 +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second +) + +// NewExponential creates an instance of ExponentialBackOff using default values. +func NewExponential() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + currentInterval: DefaultInitialInterval, + } + b.Reset() + return b +} + +// Reset the interval back to the initial retry interval and restarts the timer. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval +} + +func (b *ExponentialBackOff) GetSleepTime() time.Duration { + return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) +} + +func (b *ExponentialBackOff) BackOff() { + time.Sleep(b.GetSleepTime()) + + b.IncrementCurrentInterval() +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) IncrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +func (b *ExponentialBackOff) Inverval() time.Duration { + return b.currentInterval +} + +// Returns a random value from the interval: +// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/azr/backoff/linear.go b/vendor/github.com/azr/backoff/linear.go new file mode 100644 index 0000000..b887b77 --- /dev/null +++ b/vendor/github.com/azr/backoff/linear.go @@ -0,0 +1,44 @@ +package backoff + +// LinearBackOff is a back-off policy whose back-off time is multiplied by mult and incremented by incr +// each time it is called. +// mult can be one ;). +import "time" + +// grows linearly until +type LinearBackOff struct { + InitialInterval time.Duration + Multiplier float64 + Increment time.Duration + MaxInterval time.Duration + currentInterval time.Duration +} + +var _ Interface = (*LinearBackOff)(nil) + +func NewLinear(from, to, incr time.Duration, mult float64) *LinearBackOff { + return &LinearBackOff{ + InitialInterval: from, + MaxInterval: to, + currentInterval: from, + Increment: incr, + Multiplier: mult, + } +} + +func (lb *LinearBackOff) Reset() { + lb.currentInterval = lb.InitialInterval +} + +func (lb *LinearBackOff) increment() { + lb.currentInterval = time.Duration(float64(lb.currentInterval) * lb.Multiplier) + lb.currentInterval += lb.Increment + if lb.currentInterval > lb.MaxInterval { + lb.currentInterval = lb.MaxInterval + } +} + +func (lb *LinearBackOff) BackOff() { + time.Sleep(lb.currentInterval) + lb.increment() +} diff --git a/vendor/github.com/dustin/go-jsonpointer/.gitignore b/vendor/github.com/dustin/go-jsonpointer/.gitignore new file mode 100644 index 0000000..4e8e42f --- /dev/null +++ b/vendor/github.com/dustin/go-jsonpointer/.gitignore @@ -0,0 +1,2 @@ +#* +*~ diff --git a/vendor/github.com/dustin/go-jsonpointer/LICENSE b/vendor/github.com/dustin/go-jsonpointer/LICENSE new file mode 100644 index 0000000..b01ef80 --- /dev/null +++ b/vendor/github.com/dustin/go-jsonpointer/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/dustin/go-jsonpointer/README.markdown b/vendor/github.com/dustin/go-jsonpointer/README.markdown new file mode 100644 index 0000000..4153212 --- /dev/null +++ b/vendor/github.com/dustin/go-jsonpointer/README.markdown @@ -0,0 +1,5 @@ +# JSON Pointer for go + +This is an implementation of [JSON Pointer](http://tools.ietf.org/html/rfc6901). + +[![Coverage Status](https://coveralls.io/repos/dustin/go-jsonpointer/badge.png?branch=master)](https://coveralls.io/r/dustin/go-jsonpointer?branch=master) diff --git a/vendor/github.com/dustin/go-jsonpointer/bytes.go b/vendor/github.com/dustin/go-jsonpointer/bytes.go new file mode 100644 index 0000000..4f9e285 --- /dev/null +++ b/vendor/github.com/dustin/go-jsonpointer/bytes.go @@ -0,0 +1,328 @@ +package jsonpointer + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/dustin/gojson" +) + +func arreq(a, b []string) bool { + if len(a) == len(b) { + for i := range a { + if a[i] != b[i] { + return false + } + } + return true + } + + return false +} + +func unescape(s string) string { + n := strings.Count(s, "~") + if n == 0 { + return s + } + + t := make([]byte, len(s)-n+1) // remove one char per ~ + w := 0 + start := 0 + for i := 0; i < n; i++ { + j := start + strings.Index(s[start:], "~") + w += copy(t[w:], s[start:j]) + if len(s) < j+2 { + t[w] = '~' + w++ + break + } + c := s[j+1] + switch c { + case '0': + t[w] = '~' + w++ + case '1': + t[w] = '/' + w++ + default: + t[w] = '~' + w++ + t[w] = c + w++ + } + start = j + 2 + } + w += copy(t[w:], s[start:]) + return string(t[0:w]) +} + +func parsePointer(s string) []string { + a := strings.Split(s[1:], "/") + if !strings.Contains(s, "~") { + return a + } + + for i := range a { + if strings.Contains(a[i], "~") { + a[i] = unescape(a[i]) + } + } + return a +} + +func escape(s string, out []rune) []rune { + for _, c := range s { + switch c { + case '/': + out = append(out, '~', '1') + case '~': + out = append(out, '~', '0') + default: + out = append(out, c) + } + } + return out +} + +func encodePointer(p []string) string { + out := make([]rune, 0, 64) + + for _, s := range p { + out = append(out, '/') + out = escape(s, out) + } + return string(out) +} + +func grokLiteral(b []byte) string { + s, ok := json.UnquoteBytes(b) + if !ok { + panic("could not grok literal " + string(b)) + } + return string(s) +} + +func isSpace(c rune) bool { + return c == ' ' || c == '\t' || c == '\r' || c == '\n' +} + +// FindDecode finds an object by JSONPointer path and then decode the +// result into a user-specified object. Errors if a properly +// formatted JSON document can't be found at the given path. +func FindDecode(data []byte, path string, into interface{}) error { + d, err := Find(data, path) + if err != nil { + return err + } + return json.Unmarshal(d, into) +} + +// Find a section of raw JSON by specifying a JSONPointer. +func Find(data []byte, path string) ([]byte, error) { + if path == "" { + return data, nil + } + + needle := parsePointer(path) + + scan := &json.Scanner{} + scan.Reset() + + offset := 0 + beganLiteral := 0 + current := make([]string, 0, 32) + for { + if offset >= len(data) { + break + } + newOp := scan.Step(scan, int(data[offset])) + offset++ + + switch newOp { + case json.ScanBeginArray: + current = append(current, "0") + case json.ScanObjectKey: + current[len(current)-1] = grokLiteral(data[beganLiteral-1 : offset-1]) + case json.ScanBeginLiteral: + beganLiteral = offset + case json.ScanArrayValue: + n := mustParseInt(current[len(current)-1]) + current[len(current)-1] = strconv.Itoa(n + 1) + case json.ScanEndArray, json.ScanEndObject: + current = sliceToEnd(current) + case json.ScanBeginObject: + current = append(current, "") + case json.ScanContinue, json.ScanSkipSpace, json.ScanObjectValue, json.ScanEnd: + default: + return nil, fmt.Errorf("found unhandled json op: %v", newOp) + } + + if (newOp == json.ScanBeginArray || newOp == json.ScanArrayValue || + newOp == json.ScanObjectKey) && arreq(needle, current) { + otmp := offset + for isSpace(rune(data[otmp])) { + otmp++ + } + if data[otmp] == ']' { + // special case an array offset miss + offset = otmp + return nil, nil + } + val, _, err := json.NextValue(data[offset:], scan) + return val, err + } + } + + return nil, nil +} + +func sliceToEnd(s []string) []string { + end := len(s) - 1 + if end >= 0 { + s = s[:end] + } + return s + +} + +func mustParseInt(s string) int { + n, err := strconv.Atoi(s) + if err == nil { + return n + } + panic(err) +} + +// ListPointers lists all possible pointers from the given input. +func ListPointers(data []byte) ([]string, error) { + if len(data) == 0 { + return nil, fmt.Errorf("Invalid JSON") + } + rv := []string{""} + + scan := &json.Scanner{} + scan.Reset() + + offset := 0 + beganLiteral := 0 + var current []string + for { + if offset >= len(data) { + return rv, nil + } + newOp := scan.Step(scan, int(data[offset])) + offset++ + + switch newOp { + case json.ScanBeginArray: + current = append(current, "0") + case json.ScanObjectKey: + current[len(current)-1] = grokLiteral(data[beganLiteral-1 : offset-1]) + case json.ScanBeginLiteral: + beganLiteral = offset + case json.ScanArrayValue: + n := mustParseInt(current[len(current)-1]) + current[len(current)-1] = strconv.Itoa(n + 1) + case json.ScanEndArray, json.ScanEndObject: + current = sliceToEnd(current) + case json.ScanBeginObject: + current = append(current, "") + case json.ScanError: + return nil, fmt.Errorf("Error reading JSON object at offset %v", offset) + } + + if newOp == json.ScanBeginArray || newOp == json.ScanArrayValue || + newOp == json.ScanObjectKey { + rv = append(rv, encodePointer(current)) + } + } +} + +// FindMany finds several jsonpointers in one pass through the input. +func FindMany(data []byte, paths []string) (map[string][]byte, error) { + tpaths := make([]string, 0, len(paths)) + m := map[string][]byte{} + for _, p := range paths { + if p == "" { + m[p] = data + } else { + tpaths = append(tpaths, p) + } + } + sort.Strings(tpaths) + + scan := &json.Scanner{} + scan.Reset() + + offset := 0 + todo := len(tpaths) + beganLiteral := 0 + matchedAt := 0 + var current []string + for todo > 0 { + if offset >= len(data) { + break + } + newOp := scan.Step(scan, int(data[offset])) + offset++ + + switch newOp { + case json.ScanBeginArray: + current = append(current, "0") + case json.ScanObjectKey: + current[len(current)-1] = grokLiteral(data[beganLiteral-1 : offset-1]) + case json.ScanBeginLiteral: + beganLiteral = offset + case json.ScanArrayValue: + n := mustParseInt(current[len(current)-1]) + current[len(current)-1] = strconv.Itoa(n + 1) + case json.ScanEndArray, json.ScanEndObject: + current = sliceToEnd(current) + case json.ScanBeginObject: + current = append(current, "") + } + + if newOp == json.ScanBeginArray || newOp == json.ScanArrayValue || + newOp == json.ScanObjectKey { + + if matchedAt < len(current)-1 { + continue + } + if matchedAt > len(current) { + matchedAt = len(current) + } + + currentStr := encodePointer(current) + off := sort.SearchStrings(tpaths, currentStr) + if off < len(tpaths) { + // Check to see if the path we're + // going down could even lead to a + // possible match. + if strings.HasPrefix(tpaths[off], currentStr) { + matchedAt++ + } + // And if it's not an exact match, keep parsing. + if tpaths[off] != currentStr { + continue + } + } else { + // Fell of the end of the list, no possible match + continue + } + + // At this point, we have an exact match, so grab it. + stmp := &json.Scanner{} + val, _, err := json.NextValue(data[offset:], stmp) + if err != nil { + return m, err + } + m[currentStr] = val + todo-- + } + } + + return m, nil +} diff --git a/vendor/github.com/dustin/go-jsonpointer/doc.go b/vendor/github.com/dustin/go-jsonpointer/doc.go new file mode 100644 index 0000000..9555258 --- /dev/null +++ b/vendor/github.com/dustin/go-jsonpointer/doc.go @@ -0,0 +1,2 @@ +// Package jsonpointer implements RFC6901 JSON Pointers +package jsonpointer diff --git a/vendor/github.com/dustin/go-jsonpointer/map.go b/vendor/github.com/dustin/go-jsonpointer/map.go new file mode 100644 index 0000000..7a97529 --- /dev/null +++ b/vendor/github.com/dustin/go-jsonpointer/map.go @@ -0,0 +1,38 @@ +package jsonpointer + +import ( + "strconv" + "strings" +) + +// Get the value at the specified path. +func Get(m map[string]interface{}, path string) interface{} { + if path == "" { + return m + } + + parts := strings.Split(path[1:], "/") + var rv interface{} = m + + for _, p := range parts { + switch v := rv.(type) { + case map[string]interface{}: + if strings.Contains(p, "~") { + p = strings.Replace(p, "~1", "/", -1) + p = strings.Replace(p, "~0", "~", -1) + } + rv = v[p] + case []interface{}: + i, err := strconv.Atoi(p) + if err == nil && i < len(v) { + rv = v[i] + } else { + return nil + } + default: + return nil + } + } + + return rv +} diff --git a/vendor/github.com/dustin/go-jsonpointer/reflect.go b/vendor/github.com/dustin/go-jsonpointer/reflect.go new file mode 100644 index 0000000..d5e4da8 --- /dev/null +++ b/vendor/github.com/dustin/go-jsonpointer/reflect.go @@ -0,0 +1,171 @@ +package jsonpointer + +import ( + "reflect" + "strconv" + "strings" +) + +// Reflect gets the value at the specified path from a struct. +func Reflect(o interface{}, path string) interface{} { + if path == "" { + return o + } + + parts := parsePointer(path) + var rv interface{} = o + +OUTER: + for _, p := range parts { + val := reflect.ValueOf(rv) + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + + if val.Kind() == reflect.Struct { + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + sf := typ.Field(i) + tag := sf.Tag.Get("json") + name := parseJSONTagName(tag) + if (name != "" && name == p) || sf.Name == p { + rv = val.Field(i).Interface() + continue OUTER + } + } + // Found no matching field. + return nil + } else if val.Kind() == reflect.Map { + // our pointer always gives us a string key + // here we try to convert it into the correct type + mapKey, canConvert := makeMapKeyFromString(val.Type().Key(), p) + if canConvert { + field := val.MapIndex(mapKey) + if field.IsValid() { + rv = field.Interface() + } else { + return nil + } + } else { + return nil + } + } else if val.Kind() == reflect.Slice || val.Kind() == reflect.Array { + i, err := strconv.Atoi(p) + if err == nil && i < val.Len() { + rv = val.Index(i).Interface() + } else { + return nil + } + } else { + return nil + } + } + + return rv +} + +// ReflectListPointers lists all possible pointers from the given struct. +func ReflectListPointers(o interface{}) ([]string, error) { + return reflectListPointersRecursive(o, ""), nil +} + +func reflectListPointersRecursive(o interface{}, prefix string) []string { + rv := []string{prefix + ""} + + val := reflect.ValueOf(o) + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + + if val.Kind() == reflect.Struct { + + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + child := val.Field(i).Interface() + sf := typ.Field(i) + tag := sf.Tag.Get("json") + name := parseJSONTagName(tag) + if name != "" { + // use the tag name + childReults := reflectListPointersRecursive(child, prefix+encodePointer([]string{name})) + rv = append(rv, childReults...) + } else { + // use the original field name + childResults := reflectListPointersRecursive(child, prefix+encodePointer([]string{sf.Name})) + rv = append(rv, childResults...) + } + } + + } else if val.Kind() == reflect.Map { + for _, k := range val.MapKeys() { + child := val.MapIndex(k).Interface() + mapKeyName := makeMapKeyName(k) + childReults := reflectListPointersRecursive(child, prefix+encodePointer([]string{mapKeyName})) + rv = append(rv, childReults...) + } + } else if val.Kind() == reflect.Slice || val.Kind() == reflect.Array { + for i := 0; i < val.Len(); i++ { + child := val.Index(i).Interface() + childResults := reflectListPointersRecursive(child, prefix+encodePointer([]string{strconv.Itoa(i)})) + rv = append(rv, childResults...) + } + } + return rv +} + +// makeMapKeyName takes a map key value and creates a string representation +func makeMapKeyName(v reflect.Value) string { + switch v.Kind() { + case reflect.Float32, reflect.Float64: + fv := v.Float() + return strconv.FormatFloat(fv, 'f', -1, v.Type().Bits()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + iv := v.Int() + return strconv.FormatInt(iv, 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + iv := v.Uint() + return strconv.FormatUint(iv, 10) + default: + return v.String() + } +} + +// makeMapKeyFromString takes the key type for a map, and a string +// representing the key, it then tries to convert the string +// representation into a value of the correct type. +func makeMapKeyFromString(mapKeyType reflect.Type, pointer string) (reflect.Value, bool) { + valp := reflect.New(mapKeyType) + val := reflect.Indirect(valp) + switch mapKeyType.Kind() { + case reflect.String: + return reflect.ValueOf(pointer), true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + iv, err := strconv.ParseInt(pointer, 10, mapKeyType.Bits()) + if err == nil { + val.SetInt(iv) + return val, true + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + iv, err := strconv.ParseUint(pointer, 10, mapKeyType.Bits()) + if err == nil { + val.SetUint(iv) + return val, true + } + case reflect.Float32, reflect.Float64: + fv, err := strconv.ParseFloat(pointer, mapKeyType.Bits()) + if err == nil { + val.SetFloat(fv) + return val, true + } + } + + return reflect.ValueOf(nil), false +} + +// parseJSONTagName extracts the JSON field name from a struct tag +func parseJSONTagName(tag string) string { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx] + } + return tag +} diff --git a/vendor/github.com/dustin/gojson/.gitignore b/vendor/github.com/dustin/gojson/.gitignore new file mode 100644 index 0000000..4e8e42f --- /dev/null +++ b/vendor/github.com/dustin/gojson/.gitignore @@ -0,0 +1,2 @@ +#* +*~ diff --git a/vendor/github.com/dustin/gojson/LICENSE b/vendor/github.com/dustin/gojson/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/dustin/gojson/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/dustin/gojson/decode.go b/vendor/github.com/dustin/gojson/decode.go new file mode 100644 index 0000000..cf467d5 --- /dev/null +++ b/vendor/github.com/dustin/gojson/decode.go @@ -0,0 +1,1089 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshalling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.Reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan Scanner + nextscan Scanner // for calls to NextValue + savedError error + useNumber bool +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := NextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.Step(&d.scan, '}') + } else { + d.scan.Step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.EOF() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := int(d.data[d.off]) + d.off++ + newOp = d.scan.Step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := NextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.Step = stateBeginValue + } + d.scan.Step(&d.scan, '"') + d.scan.Step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.Step(&d.scan, ':') + d.scan.Step(&d.scan, '"') + d.scan.Step(&d.scan, '"') + d.scan.Step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(ScanSkipSpace); op { + default: + d.error(errPhase) + + case ScanBeginArray: + d.array(v) + + case ScanBeginObject: + d.object(v) + + case ScanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(ScanSkipSpace); op { + default: + d.error(errPhase) + + case ScanBeginArray: + d.array(reflect.Value{}) + + case ScanBeginObject: + d.object(reflect.Value{}) + + case ScanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type()}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type()}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(ScanSkipSpace) + if op == ScanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(ScanSkipSpace) + if op == ScanEndArray { + break + } + if op != ScanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type()}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type()}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type()}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(ScanSkipSpace) + if op == ScanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != ScanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(ScanContinue) + item := d.data[start : d.off-1] + key, ok := UnquoteBytes(item) + if !ok { + d.error(errPhase) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == ScanSkipSpace { + op = d.scanWhile(ScanSkipSpace) + } + if op != ScanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", item, v.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(ScanSkipSpace) + if op == ScanEndObject { + break + } + if op != ScanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(ScanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type()}) + } + } + s, ok := UnquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type()}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type()}) + } + } + + case '"': // string + s, ok := UnquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type()}) + case reflect.Slice: + if v.Type() != byteSliceType { + d.saveError(&UnmarshalTypeError{"string", v.Type()}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.Set(reflect.ValueOf(b[0:n])) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type()}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type()}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type()}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type()}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type()}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type()}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(ScanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case ScanBeginArray: + return d.arrayInterface() + case ScanBeginObject: + return d.objectInterface() + case ScanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(ScanSkipSpace) + if op == ScanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(ScanSkipSpace) + if op == ScanEndArray { + break + } + if op != ScanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + for { + // Read opening " of string key or closing }. + op := d.scanWhile(ScanSkipSpace) + if op == ScanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != ScanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(ScanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Read : before value. + if op == ScanSkipSpace { + op = d.scanWhile(ScanSkipSpace) + } + if op != ScanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(ScanSkipSpace) + if op == ScanEndObject { + break + } + if op != ScanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(ScanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = UnquoteBytes(s) + t = string(s) + return +} + +func UnquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + s = bytes.TrimSpace(s) + + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + } + + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/dustin/gojson/encode.go b/vendor/github.com/dustin/gojson/encode.go new file mode 100644 index 0000000..fca2a09 --- /dev/null +++ b/vendor/github.com/dustin/gojson/encode.go @@ -0,0 +1,1183 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// http://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// or integer types. This extra level of encoding is sometimes used when +// communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the object keys are used directly +// as map keys. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML